| // |
| // Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. |
| // Copyright (c) 2014, Red Hat Inc. All rights reserved. |
| // DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. |
| // |
| // This code is free software; you can redistribute it and/or modify it |
| // under the terms of the GNU General Public License version 2 only, as |
| // published by the Free Software Foundation. |
| // |
| // This code is distributed in the hope that it will be useful, but WITHOUT |
| // ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| // FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
| // version 2 for more details (a copy is included in the LICENSE file that |
| // accompanied this code). |
| // |
| // You should have received a copy of the GNU General Public License version |
| // 2 along with this work; if not, write to the Free Software Foundation, |
| // Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. |
| // |
| // Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA |
| // or visit www.oracle.com if you need additional information or have any |
| // questions. |
| // |
| // |
| |
| // AArch64 Architecture Description File |
| |
| //----------REGISTER DEFINITION BLOCK------------------------------------------ |
| // This information is used by the matcher and the register allocator to |
| // describe individual registers and classes of registers within the target |
| // archtecture. |
| |
| register %{ |
| //----------Architecture Description Register Definitions---------------------- |
| // General Registers |
| // "reg_def" name ( register save type, C convention save type, |
| // ideal register type, encoding ); |
| // Register Save Types: |
| // |
| // NS = No-Save: The register allocator assumes that these registers |
| // can be used without saving upon entry to the method, & |
| // that they do not need to be saved at call sites. |
| // |
| // SOC = Save-On-Call: The register allocator assumes that these registers |
| // can be used without saving upon entry to the method, |
| // but that they must be saved at call sites. |
| // |
| // SOE = Save-On-Entry: The register allocator assumes that these registers |
| // must be saved before using them upon entry to the |
| // method, but they do not need to be saved at call |
| // sites. |
| // |
| // AS = Always-Save: The register allocator assumes that these registers |
| // must be saved before using them upon entry to the |
| // method, & that they must be saved at call sites. |
| // |
| // Ideal Register Type is used to determine how to save & restore a |
| // register. Op_RegI will get spilled with LoadI/StoreI, Op_RegP will get |
| // spilled with LoadP/StoreP. If the register supports both, use Op_RegI. |
| // |
| // The encoding number is the actual bit-pattern placed into the opcodes. |
| |
| // We must define the 64 bit int registers in two 32 bit halves, the |
| // real lower register and a virtual upper half register. upper halves |
| // are used by the register allocator but are not actually supplied as |
| // operands to memory ops. |
| // |
| // follow the C1 compiler in making registers |
| // |
| // r0-r7,r10-r26 volatile (caller save) |
| // r27-r32 system (no save, no allocate) |
| // r8-r9 invisible to the allocator (so we can use them as scratch regs) |
| // |
| // as regards Java usage. we don't use any callee save registers |
| // because this makes it difficult to de-optimise a frame (see comment |
| // in x86 implementation of Deoptimization::unwind_callee_save_values) |
| // |
| |
| // General Registers |
| |
| reg_def R0 ( SOC, SOC, Op_RegI, 0, r0->as_VMReg() ); |
| reg_def R0_H ( SOC, SOC, Op_RegI, 0, r0->as_VMReg()->next() ); |
| reg_def R1 ( SOC, SOC, Op_RegI, 1, r1->as_VMReg() ); |
| reg_def R1_H ( SOC, SOC, Op_RegI, 1, r1->as_VMReg()->next() ); |
| reg_def R2 ( SOC, SOC, Op_RegI, 2, r2->as_VMReg() ); |
| reg_def R2_H ( SOC, SOC, Op_RegI, 2, r2->as_VMReg()->next() ); |
| reg_def R3 ( SOC, SOC, Op_RegI, 3, r3->as_VMReg() ); |
| reg_def R3_H ( SOC, SOC, Op_RegI, 3, r3->as_VMReg()->next() ); |
| reg_def R4 ( SOC, SOC, Op_RegI, 4, r4->as_VMReg() ); |
| reg_def R4_H ( SOC, SOC, Op_RegI, 4, r4->as_VMReg()->next() ); |
| reg_def R5 ( SOC, SOC, Op_RegI, 5, r5->as_VMReg() ); |
| reg_def R5_H ( SOC, SOC, Op_RegI, 5, r5->as_VMReg()->next() ); |
| reg_def R6 ( SOC, SOC, Op_RegI, 6, r6->as_VMReg() ); |
| reg_def R6_H ( SOC, SOC, Op_RegI, 6, r6->as_VMReg()->next() ); |
| reg_def R7 ( SOC, SOC, Op_RegI, 7, r7->as_VMReg() ); |
| reg_def R7_H ( SOC, SOC, Op_RegI, 7, r7->as_VMReg()->next() ); |
| reg_def R10 ( SOC, SOC, Op_RegI, 10, r10->as_VMReg() ); |
| reg_def R10_H ( SOC, SOC, Op_RegI, 10, r10->as_VMReg()->next()); |
| reg_def R11 ( SOC, SOC, Op_RegI, 11, r11->as_VMReg() ); |
| reg_def R11_H ( SOC, SOC, Op_RegI, 11, r11->as_VMReg()->next()); |
| reg_def R12 ( SOC, SOC, Op_RegI, 12, r12->as_VMReg() ); |
| reg_def R12_H ( SOC, SOC, Op_RegI, 12, r12->as_VMReg()->next()); |
| reg_def R13 ( SOC, SOC, Op_RegI, 13, r13->as_VMReg() ); |
| reg_def R13_H ( SOC, SOC, Op_RegI, 13, r13->as_VMReg()->next()); |
| reg_def R14 ( SOC, SOC, Op_RegI, 14, r14->as_VMReg() ); |
| reg_def R14_H ( SOC, SOC, Op_RegI, 14, r14->as_VMReg()->next()); |
| reg_def R15 ( SOC, SOC, Op_RegI, 15, r15->as_VMReg() ); |
| reg_def R15_H ( SOC, SOC, Op_RegI, 15, r15->as_VMReg()->next()); |
| reg_def R16 ( SOC, SOC, Op_RegI, 16, r16->as_VMReg() ); |
| reg_def R16_H ( SOC, SOC, Op_RegI, 16, r16->as_VMReg()->next()); |
| reg_def R17 ( SOC, SOC, Op_RegI, 17, r17->as_VMReg() ); |
| reg_def R17_H ( SOC, SOC, Op_RegI, 17, r17->as_VMReg()->next()); |
| reg_def R18 ( SOC, SOC, Op_RegI, 18, r18->as_VMReg() ); |
| reg_def R18_H ( SOC, SOC, Op_RegI, 18, r18->as_VMReg()->next()); |
| reg_def R19 ( SOC, SOE, Op_RegI, 19, r19->as_VMReg() ); |
| reg_def R19_H ( SOC, SOE, Op_RegI, 19, r19->as_VMReg()->next()); |
| reg_def R20 ( SOC, SOE, Op_RegI, 20, r20->as_VMReg() ); // caller esp |
| reg_def R20_H ( SOC, SOE, Op_RegI, 20, r20->as_VMReg()->next()); |
| reg_def R21 ( SOC, SOE, Op_RegI, 21, r21->as_VMReg() ); |
| reg_def R21_H ( SOC, SOE, Op_RegI, 21, r21->as_VMReg()->next()); |
| reg_def R22 ( SOC, SOE, Op_RegI, 22, r22->as_VMReg() ); |
| reg_def R22_H ( SOC, SOE, Op_RegI, 22, r22->as_VMReg()->next()); |
| reg_def R23 ( SOC, SOE, Op_RegI, 23, r23->as_VMReg() ); |
| reg_def R23_H ( SOC, SOE, Op_RegI, 23, r23->as_VMReg()->next()); |
| reg_def R24 ( SOC, SOE, Op_RegI, 24, r24->as_VMReg() ); |
| reg_def R24_H ( SOC, SOE, Op_RegI, 24, r24->as_VMReg()->next()); |
| reg_def R25 ( SOC, SOE, Op_RegI, 25, r25->as_VMReg() ); |
| reg_def R25_H ( SOC, SOE, Op_RegI, 25, r25->as_VMReg()->next()); |
| reg_def R26 ( SOC, SOE, Op_RegI, 26, r26->as_VMReg() ); |
| reg_def R26_H ( SOC, SOE, Op_RegI, 26, r26->as_VMReg()->next()); |
| reg_def R27 ( NS, SOE, Op_RegI, 27, r27->as_VMReg() ); // heapbase |
| reg_def R27_H ( NS, SOE, Op_RegI, 27, r27->as_VMReg()->next()); |
| reg_def R28 ( NS, SOE, Op_RegI, 28, r28->as_VMReg() ); // thread |
| reg_def R28_H ( NS, SOE, Op_RegI, 28, r28->as_VMReg()->next()); |
| reg_def R29 ( NS, NS, Op_RegI, 29, r29->as_VMReg() ); // fp |
| reg_def R29_H ( NS, NS, Op_RegI, 29, r29->as_VMReg()->next()); |
| reg_def R30 ( NS, NS, Op_RegI, 30, r30->as_VMReg() ); // lr |
| reg_def R30_H ( NS, NS, Op_RegI, 30, r30->as_VMReg()->next()); |
| reg_def R31 ( NS, NS, Op_RegI, 31, r31_sp->as_VMReg() ); // sp |
| reg_def R31_H ( NS, NS, Op_RegI, 31, r31_sp->as_VMReg()->next()); |
| |
| // ---------------------------- |
| // Float/Double Registers |
| // ---------------------------- |
| |
| // Double Registers |
| |
| // The rules of ADL require that double registers be defined in pairs. |
| // Each pair must be two 32-bit values, but not necessarily a pair of |
| // single float registers. In each pair, ADLC-assigned register numbers |
| // must be adjacent, with the lower number even. Finally, when the |
| // CPU stores such a register pair to memory, the word associated with |
| // the lower ADLC-assigned number must be stored to the lower address. |
| |
| // AArch64 has 32 floating-point registers. Each can store a vector of |
| // single or double precision floating-point values up to 8 * 32 |
| // floats, 4 * 64 bit floats or 2 * 128 bit floats. We currently only |
| // use the first float or double element of the vector. |
| |
| // for Java use float registers v0-v15 are always save on call whereas |
| // the platform ABI treats v8-v15 as callee save). float registers |
| // v16-v31 are SOC as per the platform spec |
| |
| reg_def V0 ( SOC, SOC, Op_RegF, 0, v0->as_VMReg() ); |
| reg_def V0_H ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next() ); |
| reg_def V0_J ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(2) ); |
| reg_def V0_K ( SOC, SOC, Op_RegF, 0, v0->as_VMReg()->next(3) ); |
| |
| reg_def V1 ( SOC, SOC, Op_RegF, 1, v1->as_VMReg() ); |
| reg_def V1_H ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next() ); |
| reg_def V1_J ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(2) ); |
| reg_def V1_K ( SOC, SOC, Op_RegF, 1, v1->as_VMReg()->next(3) ); |
| |
| reg_def V2 ( SOC, SOC, Op_RegF, 2, v2->as_VMReg() ); |
| reg_def V2_H ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next() ); |
| reg_def V2_J ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(2) ); |
| reg_def V2_K ( SOC, SOC, Op_RegF, 2, v2->as_VMReg()->next(3) ); |
| |
| reg_def V3 ( SOC, SOC, Op_RegF, 3, v3->as_VMReg() ); |
| reg_def V3_H ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next() ); |
| reg_def V3_J ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(2) ); |
| reg_def V3_K ( SOC, SOC, Op_RegF, 3, v3->as_VMReg()->next(3) ); |
| |
| reg_def V4 ( SOC, SOC, Op_RegF, 4, v4->as_VMReg() ); |
| reg_def V4_H ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next() ); |
| reg_def V4_J ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(2) ); |
| reg_def V4_K ( SOC, SOC, Op_RegF, 4, v4->as_VMReg()->next(3) ); |
| |
| reg_def V5 ( SOC, SOC, Op_RegF, 5, v5->as_VMReg() ); |
| reg_def V5_H ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next() ); |
| reg_def V5_J ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(2) ); |
| reg_def V5_K ( SOC, SOC, Op_RegF, 5, v5->as_VMReg()->next(3) ); |
| |
| reg_def V6 ( SOC, SOC, Op_RegF, 6, v6->as_VMReg() ); |
| reg_def V6_H ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next() ); |
| reg_def V6_J ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(2) ); |
| reg_def V6_K ( SOC, SOC, Op_RegF, 6, v6->as_VMReg()->next(3) ); |
| |
| reg_def V7 ( SOC, SOC, Op_RegF, 7, v7->as_VMReg() ); |
| reg_def V7_H ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next() ); |
| reg_def V7_J ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(2) ); |
| reg_def V7_K ( SOC, SOC, Op_RegF, 7, v7->as_VMReg()->next(3) ); |
| |
| reg_def V8 ( SOC, SOC, Op_RegF, 8, v8->as_VMReg() ); |
| reg_def V8_H ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next() ); |
| reg_def V8_J ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(2) ); |
| reg_def V8_K ( SOC, SOC, Op_RegF, 8, v8->as_VMReg()->next(3) ); |
| |
| reg_def V9 ( SOC, SOC, Op_RegF, 9, v9->as_VMReg() ); |
| reg_def V9_H ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next() ); |
| reg_def V9_J ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(2) ); |
| reg_def V9_K ( SOC, SOC, Op_RegF, 9, v9->as_VMReg()->next(3) ); |
| |
| reg_def V10 ( SOC, SOC, Op_RegF, 10, v10->as_VMReg() ); |
| reg_def V10_H( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next() ); |
| reg_def V10_J( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(2)); |
| reg_def V10_K( SOC, SOC, Op_RegF, 10, v10->as_VMReg()->next(3)); |
| |
| reg_def V11 ( SOC, SOC, Op_RegF, 11, v11->as_VMReg() ); |
| reg_def V11_H( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next() ); |
| reg_def V11_J( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(2)); |
| reg_def V11_K( SOC, SOC, Op_RegF, 11, v11->as_VMReg()->next(3)); |
| |
| reg_def V12 ( SOC, SOC, Op_RegF, 12, v12->as_VMReg() ); |
| reg_def V12_H( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next() ); |
| reg_def V12_J( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(2)); |
| reg_def V12_K( SOC, SOC, Op_RegF, 12, v12->as_VMReg()->next(3)); |
| |
| reg_def V13 ( SOC, SOC, Op_RegF, 13, v13->as_VMReg() ); |
| reg_def V13_H( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next() ); |
| reg_def V13_J( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(2)); |
| reg_def V13_K( SOC, SOC, Op_RegF, 13, v13->as_VMReg()->next(3)); |
| |
| reg_def V14 ( SOC, SOC, Op_RegF, 14, v14->as_VMReg() ); |
| reg_def V14_H( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next() ); |
| reg_def V14_J( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(2)); |
| reg_def V14_K( SOC, SOC, Op_RegF, 14, v14->as_VMReg()->next(3)); |
| |
| reg_def V15 ( SOC, SOC, Op_RegF, 15, v15->as_VMReg() ); |
| reg_def V15_H( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next() ); |
| reg_def V15_J( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(2)); |
| reg_def V15_K( SOC, SOC, Op_RegF, 15, v15->as_VMReg()->next(3)); |
| |
| reg_def V16 ( SOC, SOC, Op_RegF, 16, v16->as_VMReg() ); |
| reg_def V16_H( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next() ); |
| reg_def V16_J( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(2)); |
| reg_def V16_K( SOC, SOC, Op_RegF, 16, v16->as_VMReg()->next(3)); |
| |
| reg_def V17 ( SOC, SOC, Op_RegF, 17, v17->as_VMReg() ); |
| reg_def V17_H( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next() ); |
| reg_def V17_J( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(2)); |
| reg_def V17_K( SOC, SOC, Op_RegF, 17, v17->as_VMReg()->next(3)); |
| |
| reg_def V18 ( SOC, SOC, Op_RegF, 18, v18->as_VMReg() ); |
| reg_def V18_H( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next() ); |
| reg_def V18_J( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(2)); |
| reg_def V18_K( SOC, SOC, Op_RegF, 18, v18->as_VMReg()->next(3)); |
| |
| reg_def V19 ( SOC, SOC, Op_RegF, 19, v19->as_VMReg() ); |
| reg_def V19_H( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next() ); |
| reg_def V19_J( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(2)); |
| reg_def V19_K( SOC, SOC, Op_RegF, 19, v19->as_VMReg()->next(3)); |
| |
| reg_def V20 ( SOC, SOC, Op_RegF, 20, v20->as_VMReg() ); |
| reg_def V20_H( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next() ); |
| reg_def V20_J( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(2)); |
| reg_def V20_K( SOC, SOC, Op_RegF, 20, v20->as_VMReg()->next(3)); |
| |
| reg_def V21 ( SOC, SOC, Op_RegF, 21, v21->as_VMReg() ); |
| reg_def V21_H( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next() ); |
| reg_def V21_J( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(2)); |
| reg_def V21_K( SOC, SOC, Op_RegF, 21, v21->as_VMReg()->next(3)); |
| |
| reg_def V22 ( SOC, SOC, Op_RegF, 22, v22->as_VMReg() ); |
| reg_def V22_H( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next() ); |
| reg_def V22_J( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(2)); |
| reg_def V22_K( SOC, SOC, Op_RegF, 22, v22->as_VMReg()->next(3)); |
| |
| reg_def V23 ( SOC, SOC, Op_RegF, 23, v23->as_VMReg() ); |
| reg_def V23_H( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next() ); |
| reg_def V23_J( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(2)); |
| reg_def V23_K( SOC, SOC, Op_RegF, 23, v23->as_VMReg()->next(3)); |
| |
| reg_def V24 ( SOC, SOC, Op_RegF, 24, v24->as_VMReg() ); |
| reg_def V24_H( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next() ); |
| reg_def V24_J( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(2)); |
| reg_def V24_K( SOC, SOC, Op_RegF, 24, v24->as_VMReg()->next(3)); |
| |
| reg_def V25 ( SOC, SOC, Op_RegF, 25, v25->as_VMReg() ); |
| reg_def V25_H( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next() ); |
| reg_def V25_J( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(2)); |
| reg_def V25_K( SOC, SOC, Op_RegF, 25, v25->as_VMReg()->next(3)); |
| |
| reg_def V26 ( SOC, SOC, Op_RegF, 26, v26->as_VMReg() ); |
| reg_def V26_H( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next() ); |
| reg_def V26_J( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(2)); |
| reg_def V26_K( SOC, SOC, Op_RegF, 26, v26->as_VMReg()->next(3)); |
| |
| reg_def V27 ( SOC, SOC, Op_RegF, 27, v27->as_VMReg() ); |
| reg_def V27_H( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next() ); |
| reg_def V27_J( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(2)); |
| reg_def V27_K( SOC, SOC, Op_RegF, 27, v27->as_VMReg()->next(3)); |
| |
| reg_def V28 ( SOC, SOC, Op_RegF, 28, v28->as_VMReg() ); |
| reg_def V28_H( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next() ); |
| reg_def V28_J( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(2)); |
| reg_def V28_K( SOC, SOC, Op_RegF, 28, v28->as_VMReg()->next(3)); |
| |
| reg_def V29 ( SOC, SOC, Op_RegF, 29, v29->as_VMReg() ); |
| reg_def V29_H( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next() ); |
| reg_def V29_J( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(2)); |
| reg_def V29_K( SOC, SOC, Op_RegF, 29, v29->as_VMReg()->next(3)); |
| |
| reg_def V30 ( SOC, SOC, Op_RegF, 30, v30->as_VMReg() ); |
| reg_def V30_H( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next() ); |
| reg_def V30_J( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(2)); |
| reg_def V30_K( SOC, SOC, Op_RegF, 30, v30->as_VMReg()->next(3)); |
| |
| reg_def V31 ( SOC, SOC, Op_RegF, 31, v31->as_VMReg() ); |
| reg_def V31_H( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next() ); |
| reg_def V31_J( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(2)); |
| reg_def V31_K( SOC, SOC, Op_RegF, 31, v31->as_VMReg()->next(3)); |
| |
| // ---------------------------- |
| // Special Registers |
| // ---------------------------- |
| |
| // the AArch64 CSPR status flag register is not directly acessible as |
| // instruction operand. the FPSR status flag register is a system |
| // register which can be written/read using MSR/MRS but again does not |
| // appear as an operand (a code identifying the FSPR occurs as an |
| // immediate value in the instruction). |
| |
| reg_def RFLAGS(SOC, SOC, 0, 32, VMRegImpl::Bad()); |
| |
| |
| // Specify priority of register selection within phases of register |
| // allocation. Highest priority is first. A useful heuristic is to |
| // give registers a low priority when they are required by machine |
| // instructions, like EAX and EDX on I486, and choose no-save registers |
| // before save-on-call, & save-on-call before save-on-entry. Registers |
| // which participate in fixed calling sequences should come last. |
| // Registers which are used as pairs must fall on an even boundary. |
| |
| alloc_class chunk0( |
| // volatiles |
| R10, R10_H, |
| R11, R11_H, |
| R12, R12_H, |
| R13, R13_H, |
| R14, R14_H, |
| R15, R15_H, |
| R16, R16_H, |
| R17, R17_H, |
| R18, R18_H, |
| |
| // arg registers |
| R0, R0_H, |
| R1, R1_H, |
| R2, R2_H, |
| R3, R3_H, |
| R4, R4_H, |
| R5, R5_H, |
| R6, R6_H, |
| R7, R7_H, |
| |
| // non-volatiles |
| R19, R19_H, |
| R20, R20_H, |
| R21, R21_H, |
| R22, R22_H, |
| R23, R23_H, |
| R24, R24_H, |
| R25, R25_H, |
| R26, R26_H, |
| |
| // non-allocatable registers |
| |
| R27, R27_H, // heapbase |
| R28, R28_H, // thread |
| R29, R29_H, // fp |
| R30, R30_H, // lr |
| R31, R31_H, // sp |
| ); |
| |
| alloc_class chunk1( |
| |
| // no save |
| V16, V16_H, V16_J, V16_K, |
| V17, V17_H, V17_J, V17_K, |
| V18, V18_H, V18_J, V18_K, |
| V19, V19_H, V19_J, V19_K, |
| V20, V20_H, V20_J, V20_K, |
| V21, V21_H, V21_J, V21_K, |
| V22, V22_H, V22_J, V22_K, |
| V23, V23_H, V23_J, V23_K, |
| V24, V24_H, V24_J, V24_K, |
| V25, V25_H, V25_J, V25_K, |
| V26, V26_H, V26_J, V26_K, |
| V27, V27_H, V27_J, V27_K, |
| V28, V28_H, V28_J, V28_K, |
| V29, V29_H, V29_J, V29_K, |
| V30, V30_H, V30_J, V30_K, |
| V31, V31_H, V31_J, V31_K, |
| |
| // arg registers |
| V0, V0_H, V0_J, V0_K, |
| V1, V1_H, V1_J, V1_K, |
| V2, V2_H, V2_J, V2_K, |
| V3, V3_H, V3_J, V3_K, |
| V4, V4_H, V4_J, V4_K, |
| V5, V5_H, V5_J, V5_K, |
| V6, V6_H, V6_J, V6_K, |
| V7, V7_H, V7_J, V7_K, |
| |
| // non-volatiles |
| V8, V8_H, V8_J, V8_K, |
| V9, V9_H, V9_J, V9_K, |
| V10, V10_H, V10_J, V10_K, |
| V11, V11_H, V11_J, V11_K, |
| V12, V12_H, V12_J, V12_K, |
| V13, V13_H, V13_J, V13_K, |
| V14, V14_H, V14_J, V14_K, |
| V15, V15_H, V15_J, V15_K, |
| ); |
| |
| alloc_class chunk2(RFLAGS); |
| |
| //----------Architecture Description Register Classes-------------------------- |
| // Several register classes are automatically defined based upon information in |
| // this architecture description. |
| // 1) reg_class inline_cache_reg ( /* as def'd in frame section */ ) |
| // 2) reg_class compiler_method_oop_reg ( /* as def'd in frame section */ ) |
| // 2) reg_class interpreter_method_oop_reg ( /* as def'd in frame section */ ) |
| // 3) reg_class stack_slots( /* one chunk of stack-based "registers" */ ) |
| // |
| |
| // Class for all 32 bit integer registers -- excludes SP which will |
| // never be used as an integer register |
| reg_class any_reg32( |
| R0, |
| R1, |
| R2, |
| R3, |
| R4, |
| R5, |
| R6, |
| R7, |
| R10, |
| R11, |
| R12, |
| R13, |
| R14, |
| R15, |
| R16, |
| R17, |
| R18, |
| R19, |
| R20, |
| R21, |
| R22, |
| R23, |
| R24, |
| R25, |
| R26, |
| R27, |
| R28, |
| R29, |
| R30 |
| ); |
| |
| // Singleton class for R0 int register |
| reg_class int_r0_reg(R0); |
| |
| // Singleton class for R2 int register |
| reg_class int_r2_reg(R2); |
| |
| // Singleton class for R3 int register |
| reg_class int_r3_reg(R3); |
| |
| // Singleton class for R4 int register |
| reg_class int_r4_reg(R4); |
| |
| // Class for all long integer registers (including RSP) |
| reg_class any_reg( |
| R0, R0_H, |
| R1, R1_H, |
| R2, R2_H, |
| R3, R3_H, |
| R4, R4_H, |
| R5, R5_H, |
| R6, R6_H, |
| R7, R7_H, |
| R10, R10_H, |
| R11, R11_H, |
| R12, R12_H, |
| R13, R13_H, |
| R14, R14_H, |
| R15, R15_H, |
| R16, R16_H, |
| R17, R17_H, |
| R18, R18_H, |
| R19, R19_H, |
| R20, R20_H, |
| R21, R21_H, |
| R22, R22_H, |
| R23, R23_H, |
| R24, R24_H, |
| R25, R25_H, |
| R26, R26_H, |
| R27, R27_H, |
| R28, R28_H, |
| R29, R29_H, |
| R30, R30_H, |
| R31, R31_H |
| ); |
| |
| // Class for all non-special integer registers |
| reg_class no_special_reg32_no_fp( |
| R0, |
| R1, |
| R2, |
| R3, |
| R4, |
| R5, |
| R6, |
| R7, |
| R10, |
| R11, |
| R12, // rmethod |
| R13, |
| R14, |
| R15, |
| R16, |
| R17, |
| R18, |
| R19, |
| R20, |
| R21, |
| R22, |
| R23, |
| R24, |
| R25, |
| R26 |
| /* R27, */ // heapbase |
| /* R28, */ // thread |
| /* R29, */ // fp |
| /* R30, */ // lr |
| /* R31 */ // sp |
| ); |
| |
| reg_class no_special_reg32_with_fp( |
| R0, |
| R1, |
| R2, |
| R3, |
| R4, |
| R5, |
| R6, |
| R7, |
| R10, |
| R11, |
| R12, // rmethod |
| R13, |
| R14, |
| R15, |
| R16, |
| R17, |
| R18, |
| R19, |
| R20, |
| R21, |
| R22, |
| R23, |
| R24, |
| R25, |
| R26 |
| /* R27, */ // heapbase |
| /* R28, */ // thread |
| R29, // fp |
| /* R30, */ // lr |
| /* R31 */ // sp |
| ); |
| |
| reg_class_dynamic no_special_reg32(no_special_reg32_no_fp, no_special_reg32_with_fp, %{ PreserveFramePointer %}); |
| |
| // Class for all non-special long integer registers |
| reg_class no_special_reg_no_fp( |
| R0, R0_H, |
| R1, R1_H, |
| R2, R2_H, |
| R3, R3_H, |
| R4, R4_H, |
| R5, R5_H, |
| R6, R6_H, |
| R7, R7_H, |
| R10, R10_H, |
| R11, R11_H, |
| R12, R12_H, // rmethod |
| R13, R13_H, |
| R14, R14_H, |
| R15, R15_H, |
| R16, R16_H, |
| R17, R17_H, |
| R18, R18_H, |
| R19, R19_H, |
| R20, R20_H, |
| R21, R21_H, |
| R22, R22_H, |
| R23, R23_H, |
| R24, R24_H, |
| R25, R25_H, |
| R26, R26_H, |
| /* R27, R27_H, */ // heapbase |
| /* R28, R28_H, */ // thread |
| /* R29, R29_H, */ // fp |
| /* R30, R30_H, */ // lr |
| /* R31, R31_H */ // sp |
| ); |
| |
| reg_class no_special_reg_with_fp( |
| R0, R0_H, |
| R1, R1_H, |
| R2, R2_H, |
| R3, R3_H, |
| R4, R4_H, |
| R5, R5_H, |
| R6, R6_H, |
| R7, R7_H, |
| R10, R10_H, |
| R11, R11_H, |
| R12, R12_H, // rmethod |
| R13, R13_H, |
| R14, R14_H, |
| R15, R15_H, |
| R16, R16_H, |
| R17, R17_H, |
| R18, R18_H, |
| R19, R19_H, |
| R20, R20_H, |
| R21, R21_H, |
| R22, R22_H, |
| R23, R23_H, |
| R24, R24_H, |
| R25, R25_H, |
| R26, R26_H, |
| /* R27, R27_H, */ // heapbase |
| /* R28, R28_H, */ // thread |
| R29, R29_H, // fp |
| /* R30, R30_H, */ // lr |
| /* R31, R31_H */ // sp |
| ); |
| |
| reg_class_dynamic no_special_reg(no_special_reg_no_fp, no_special_reg_with_fp, %{ PreserveFramePointer %}); |
| |
| // Class for 64 bit register r0 |
| reg_class r0_reg( |
| R0, R0_H |
| ); |
| |
| // Class for 64 bit register r1 |
| reg_class r1_reg( |
| R1, R1_H |
| ); |
| |
| // Class for 64 bit register r2 |
| reg_class r2_reg( |
| R2, R2_H |
| ); |
| |
| // Class for 64 bit register r3 |
| reg_class r3_reg( |
| R3, R3_H |
| ); |
| |
| // Class for 64 bit register r4 |
| reg_class r4_reg( |
| R4, R4_H |
| ); |
| |
| // Class for 64 bit register r5 |
| reg_class r5_reg( |
| R5, R5_H |
| ); |
| |
| // Class for 64 bit register r10 |
| reg_class r10_reg( |
| R10, R10_H |
| ); |
| |
| // Class for 64 bit register r11 |
| reg_class r11_reg( |
| R11, R11_H |
| ); |
| |
| // Class for method register |
| reg_class method_reg( |
| R12, R12_H |
| ); |
| |
| // Class for heapbase register |
| reg_class heapbase_reg( |
| R27, R27_H |
| ); |
| |
| // Class for thread register |
| reg_class thread_reg( |
| R28, R28_H |
| ); |
| |
| // Class for frame pointer register |
| reg_class fp_reg( |
| R29, R29_H |
| ); |
| |
| // Class for link register |
| reg_class lr_reg( |
| R30, R30_H |
| ); |
| |
| // Class for long sp register |
| reg_class sp_reg( |
| R31, R31_H |
| ); |
| |
| // Class for all pointer registers |
| reg_class ptr_reg( |
| R0, R0_H, |
| R1, R1_H, |
| R2, R2_H, |
| R3, R3_H, |
| R4, R4_H, |
| R5, R5_H, |
| R6, R6_H, |
| R7, R7_H, |
| R10, R10_H, |
| R11, R11_H, |
| R12, R12_H, |
| R13, R13_H, |
| R14, R14_H, |
| R15, R15_H, |
| R16, R16_H, |
| R17, R17_H, |
| R18, R18_H, |
| R19, R19_H, |
| R20, R20_H, |
| R21, R21_H, |
| R22, R22_H, |
| R23, R23_H, |
| R24, R24_H, |
| R25, R25_H, |
| R26, R26_H, |
| R27, R27_H, |
| R28, R28_H, |
| R29, R29_H, |
| R30, R30_H, |
| R31, R31_H |
| ); |
| |
| // Class for all non_special pointer registers |
| reg_class no_special_ptr_reg( |
| R0, R0_H, |
| R1, R1_H, |
| R2, R2_H, |
| R3, R3_H, |
| R4, R4_H, |
| R5, R5_H, |
| R6, R6_H, |
| R7, R7_H, |
| R10, R10_H, |
| R11, R11_H, |
| R12, R12_H, |
| R13, R13_H, |
| R14, R14_H, |
| R15, R15_H, |
| R16, R16_H, |
| R17, R17_H, |
| R18, R18_H, |
| R19, R19_H, |
| R20, R20_H, |
| R21, R21_H, |
| R22, R22_H, |
| R23, R23_H, |
| R24, R24_H, |
| R25, R25_H, |
| R26, R26_H, |
| /* R27, R27_H, */ // heapbase |
| /* R28, R28_H, */ // thread |
| /* R29, R29_H, */ // fp |
| /* R30, R30_H, */ // lr |
| /* R31, R31_H */ // sp |
| ); |
| |
| // Class for all float registers |
| reg_class float_reg( |
| V0, |
| V1, |
| V2, |
| V3, |
| V4, |
| V5, |
| V6, |
| V7, |
| V8, |
| V9, |
| V10, |
| V11, |
| V12, |
| V13, |
| V14, |
| V15, |
| V16, |
| V17, |
| V18, |
| V19, |
| V20, |
| V21, |
| V22, |
| V23, |
| V24, |
| V25, |
| V26, |
| V27, |
| V28, |
| V29, |
| V30, |
| V31 |
| ); |
| |
| // Double precision float registers have virtual `high halves' that |
| // are needed by the allocator. |
| // Class for all double registers |
| reg_class double_reg( |
| V0, V0_H, |
| V1, V1_H, |
| V2, V2_H, |
| V3, V3_H, |
| V4, V4_H, |
| V5, V5_H, |
| V6, V6_H, |
| V7, V7_H, |
| V8, V8_H, |
| V9, V9_H, |
| V10, V10_H, |
| V11, V11_H, |
| V12, V12_H, |
| V13, V13_H, |
| V14, V14_H, |
| V15, V15_H, |
| V16, V16_H, |
| V17, V17_H, |
| V18, V18_H, |
| V19, V19_H, |
| V20, V20_H, |
| V21, V21_H, |
| V22, V22_H, |
| V23, V23_H, |
| V24, V24_H, |
| V25, V25_H, |
| V26, V26_H, |
| V27, V27_H, |
| V28, V28_H, |
| V29, V29_H, |
| V30, V30_H, |
| V31, V31_H |
| ); |
| |
| // Class for all 64bit vector registers |
| reg_class vectord_reg( |
| V0, V0_H, |
| V1, V1_H, |
| V2, V2_H, |
| V3, V3_H, |
| V4, V4_H, |
| V5, V5_H, |
| V6, V6_H, |
| V7, V7_H, |
| V8, V8_H, |
| V9, V9_H, |
| V10, V10_H, |
| V11, V11_H, |
| V12, V12_H, |
| V13, V13_H, |
| V14, V14_H, |
| V15, V15_H, |
| V16, V16_H, |
| V17, V17_H, |
| V18, V18_H, |
| V19, V19_H, |
| V20, V20_H, |
| V21, V21_H, |
| V22, V22_H, |
| V23, V23_H, |
| V24, V24_H, |
| V25, V25_H, |
| V26, V26_H, |
| V27, V27_H, |
| V28, V28_H, |
| V29, V29_H, |
| V30, V30_H, |
| V31, V31_H |
| ); |
| |
| // Class for all 128bit vector registers |
| reg_class vectorx_reg( |
| V0, V0_H, V0_J, V0_K, |
| V1, V1_H, V1_J, V1_K, |
| V2, V2_H, V2_J, V2_K, |
| V3, V3_H, V3_J, V3_K, |
| V4, V4_H, V4_J, V4_K, |
| V5, V5_H, V5_J, V5_K, |
| V6, V6_H, V6_J, V6_K, |
| V7, V7_H, V7_J, V7_K, |
| V8, V8_H, V8_J, V8_K, |
| V9, V9_H, V9_J, V9_K, |
| V10, V10_H, V10_J, V10_K, |
| V11, V11_H, V11_J, V11_K, |
| V12, V12_H, V12_J, V12_K, |
| V13, V13_H, V13_J, V13_K, |
| V14, V14_H, V14_J, V14_K, |
| V15, V15_H, V15_J, V15_K, |
| V16, V16_H, V16_J, V16_K, |
| V17, V17_H, V17_J, V17_K, |
| V18, V18_H, V18_J, V18_K, |
| V19, V19_H, V19_J, V19_K, |
| V20, V20_H, V20_J, V20_K, |
| V21, V21_H, V21_J, V21_K, |
| V22, V22_H, V22_J, V22_K, |
| V23, V23_H, V23_J, V23_K, |
| V24, V24_H, V24_J, V24_K, |
| V25, V25_H, V25_J, V25_K, |
| V26, V26_H, V26_J, V26_K, |
| V27, V27_H, V27_J, V27_K, |
| V28, V28_H, V28_J, V28_K, |
| V29, V29_H, V29_J, V29_K, |
| V30, V30_H, V30_J, V30_K, |
| V31, V31_H, V31_J, V31_K |
| ); |
| |
| // Class for 128 bit register v0 |
| reg_class v0_reg( |
| V0, V0_H |
| ); |
| |
| // Class for 128 bit register v1 |
| reg_class v1_reg( |
| V1, V1_H |
| ); |
| |
| // Class for 128 bit register v2 |
| reg_class v2_reg( |
| V2, V2_H |
| ); |
| |
| // Class for 128 bit register v3 |
| reg_class v3_reg( |
| V3, V3_H |
| ); |
| |
| // Singleton class for condition codes |
| reg_class int_flags(RFLAGS); |
| |
| %} |
| |
| //----------DEFINITION BLOCK--------------------------------------------------- |
| // Define name --> value mappings to inform the ADLC of an integer valued name |
| // Current support includes integer values in the range [0, 0x7FFFFFFF] |
| // Format: |
| // int_def <name> ( <int_value>, <expression>); |
| // Generated Code in ad_<arch>.hpp |
| // #define <name> (<expression>) |
| // // value == <int_value> |
| // Generated code in ad_<arch>.cpp adlc_verification() |
| // assert( <name> == <int_value>, "Expect (<expression>) to equal <int_value>"); |
| // |
| |
| // we follow the ppc-aix port in using a simple cost model which ranks |
| // register operations as cheap, memory ops as more expensive and |
| // branches as most expensive. the first two have a low as well as a |
| // normal cost. huge cost appears to be a way of saying don't do |
| // something |
| |
| definitions %{ |
| // The default cost (of a register move instruction). |
| int_def INSN_COST ( 100, 100); |
| int_def BRANCH_COST ( 200, 2 * INSN_COST); |
| int_def CALL_COST ( 200, 2 * INSN_COST); |
| int_def VOLATILE_REF_COST ( 1000, 10 * INSN_COST); |
| %} |
| |
| |
| //----------SOURCE BLOCK------------------------------------------------------- |
| // This is a block of C++ code which provides values, functions, and |
| // definitions necessary in the rest of the architecture description |
| |
| source_hpp %{ |
| |
| #include "gc/shared/cardTableModRefBS.hpp" |
| #include "opto/addnode.hpp" |
| |
| class CallStubImpl { |
| |
| //-------------------------------------------------------------- |
| //---< Used for optimization in Compile::shorten_branches >--- |
| //-------------------------------------------------------------- |
| |
| public: |
| // Size of call trampoline stub. |
| static uint size_call_trampoline() { |
| return 0; // no call trampolines on this platform |
| } |
| |
| // number of relocations needed by a call trampoline stub |
| static uint reloc_call_trampoline() { |
| return 0; // no call trampolines on this platform |
| } |
| }; |
| |
| class HandlerImpl { |
| |
| public: |
| |
| static int emit_exception_handler(CodeBuffer &cbuf); |
| static int emit_deopt_handler(CodeBuffer& cbuf); |
| |
| static uint size_exception_handler() { |
| return MacroAssembler::far_branch_size(); |
| } |
| |
| static uint size_deopt_handler() { |
| // count one adr and one far branch instruction |
| return 4 * NativeInstruction::instruction_size; |
| } |
| }; |
| |
| // graph traversal helpers |
| |
| MemBarNode *parent_membar(const Node *n); |
| MemBarNode *child_membar(const MemBarNode *n); |
| bool leading_membar(const MemBarNode *barrier); |
| |
| bool is_card_mark_membar(const MemBarNode *barrier); |
| bool is_CAS(int opcode); |
| |
| MemBarNode *leading_to_normal(MemBarNode *leading); |
| MemBarNode *normal_to_leading(const MemBarNode *barrier); |
| MemBarNode *card_mark_to_trailing(const MemBarNode *barrier); |
| MemBarNode *trailing_to_card_mark(const MemBarNode *trailing); |
| MemBarNode *trailing_to_leading(const MemBarNode *trailing); |
| |
| // predicates controlling emit of ldr<x>/ldar<x> and associated dmb |
| |
| bool unnecessary_acquire(const Node *barrier); |
| bool needs_acquiring_load(const Node *load); |
| |
| // predicates controlling emit of str<x>/stlr<x> and associated dmbs |
| |
| bool unnecessary_release(const Node *barrier); |
| bool unnecessary_volatile(const Node *barrier); |
| bool needs_releasing_store(const Node *store); |
| |
| // predicate controlling translation of CompareAndSwapX |
| bool needs_acquiring_load_exclusive(const Node *load); |
| |
| // predicate controlling translation of StoreCM |
| bool unnecessary_storestore(const Node *storecm); |
| |
| // predicate controlling addressing modes |
| bool size_fits_all_mem_uses(AddPNode* addp, int shift); |
| %} |
| |
| source %{ |
| |
| // Optimizaton of volatile gets and puts |
| // ------------------------------------- |
| // |
| // AArch64 has ldar<x> and stlr<x> instructions which we can safely |
| // use to implement volatile reads and writes. For a volatile read |
| // we simply need |
| // |
| // ldar<x> |
| // |
| // and for a volatile write we need |
| // |
| // stlr<x> |
| // |
| // Alternatively, we can implement them by pairing a normal |
| // load/store with a memory barrier. For a volatile read we need |
| // |
| // ldr<x> |
| // dmb ishld |
| // |
| // for a volatile write |
| // |
| // dmb ish |
| // str<x> |
| // dmb ish |
| // |
| // We can also use ldaxr and stlxr to implement compare and swap CAS |
| // sequences. These are normally translated to an instruction |
| // sequence like the following |
| // |
| // dmb ish |
| // retry: |
| // ldxr<x> rval raddr |
| // cmp rval rold |
| // b.ne done |
| // stlxr<x> rval, rnew, rold |
| // cbnz rval retry |
| // done: |
| // cset r0, eq |
| // dmb ishld |
| // |
| // Note that the exclusive store is already using an stlxr |
| // instruction. That is required to ensure visibility to other |
| // threads of the exclusive write (assuming it succeeds) before that |
| // of any subsequent writes. |
| // |
| // The following instruction sequence is an improvement on the above |
| // |
| // retry: |
| // ldaxr<x> rval raddr |
| // cmp rval rold |
| // b.ne done |
| // stlxr<x> rval, rnew, rold |
| // cbnz rval retry |
| // done: |
| // cset r0, eq |
| // |
| // We don't need the leading dmb ish since the stlxr guarantees |
| // visibility of prior writes in the case that the swap is |
| // successful. Crucially we don't have to worry about the case where |
| // the swap is not successful since no valid program should be |
| // relying on visibility of prior changes by the attempting thread |
| // in the case where the CAS fails. |
| // |
| // Similarly, we don't need the trailing dmb ishld if we substitute |
| // an ldaxr instruction since that will provide all the guarantees we |
| // require regarding observation of changes made by other threads |
| // before any change to the CAS address observed by the load. |
| // |
| // In order to generate the desired instruction sequence we need to |
| // be able to identify specific 'signature' ideal graph node |
| // sequences which i) occur as a translation of a volatile reads or |
| // writes or CAS operations and ii) do not occur through any other |
| // translation or graph transformation. We can then provide |
| // alternative aldc matching rules which translate these node |
| // sequences to the desired machine code sequences. Selection of the |
| // alternative rules can be implemented by predicates which identify |
| // the relevant node sequences. |
| // |
| // The ideal graph generator translates a volatile read to the node |
| // sequence |
| // |
| // LoadX[mo_acquire] |
| // MemBarAcquire |
| // |
| // As a special case when using the compressed oops optimization we |
| // may also see this variant |
| // |
| // LoadN[mo_acquire] |
| // DecodeN |
| // MemBarAcquire |
| // |
| // A volatile write is translated to the node sequence |
| // |
| // MemBarRelease |
| // StoreX[mo_release] {CardMark}-optional |
| // MemBarVolatile |
| // |
| // n.b. the above node patterns are generated with a strict |
| // 'signature' configuration of input and output dependencies (see |
| // the predicates below for exact details). The card mark may be as |
| // simple as a few extra nodes or, in a few GC configurations, may |
| // include more complex control flow between the leading and |
| // trailing memory barriers. However, whatever the card mark |
| // configuration these signatures are unique to translated volatile |
| // reads/stores -- they will not appear as a result of any other |
| // bytecode translation or inlining nor as a consequence of |
| // optimizing transforms. |
| // |
| // We also want to catch inlined unsafe volatile gets and puts and |
| // be able to implement them using either ldar<x>/stlr<x> or some |
| // combination of ldr<x>/stlr<x> and dmb instructions. |
| // |
| // Inlined unsafe volatiles puts manifest as a minor variant of the |
| // normal volatile put node sequence containing an extra cpuorder |
| // membar |
| // |
| // MemBarRelease |
| // MemBarCPUOrder |
| // StoreX[mo_release] {CardMark}-optional |
| // MemBarVolatile |
| // |
| // n.b. as an aside, the cpuorder membar is not itself subject to |
| // matching and translation by adlc rules. However, the rule |
| // predicates need to detect its presence in order to correctly |
| // select the desired adlc rules. |
| // |
| // Inlined unsafe volatile gets manifest as a somewhat different |
| // node sequence to a normal volatile get |
| // |
| // MemBarCPUOrder |
| // || \\ |
| // MemBarAcquire LoadX[mo_acquire] |
| // || |
| // MemBarCPUOrder |
| // |
| // In this case the acquire membar does not directly depend on the |
| // load. However, we can be sure that the load is generated from an |
| // inlined unsafe volatile get if we see it dependent on this unique |
| // sequence of membar nodes. Similarly, given an acquire membar we |
| // can know that it was added because of an inlined unsafe volatile |
| // get if it is fed and feeds a cpuorder membar and if its feed |
| // membar also feeds an acquiring load. |
| // |
| // Finally an inlined (Unsafe) CAS operation is translated to the |
| // following ideal graph |
| // |
| // MemBarRelease |
| // MemBarCPUOrder |
| // CompareAndSwapX {CardMark}-optional |
| // MemBarCPUOrder |
| // MemBarAcquire |
| // |
| // So, where we can identify these volatile read and write |
| // signatures we can choose to plant either of the above two code |
| // sequences. For a volatile read we can simply plant a normal |
| // ldr<x> and translate the MemBarAcquire to a dmb. However, we can |
| // also choose to inhibit translation of the MemBarAcquire and |
| // inhibit planting of the ldr<x>, instead planting an ldar<x>. |
| // |
| // When we recognise a volatile store signature we can choose to |
| // plant at a dmb ish as a translation for the MemBarRelease, a |
| // normal str<x> and then a dmb ish for the MemBarVolatile. |
| // Alternatively, we can inhibit translation of the MemBarRelease |
| // and MemBarVolatile and instead plant a simple stlr<x> |
| // instruction. |
| // |
| // when we recognise a CAS signature we can choose to plant a dmb |
| // ish as a translation for the MemBarRelease, the conventional |
| // macro-instruction sequence for the CompareAndSwap node (which |
| // uses ldxr<x>) and then a dmb ishld for the MemBarAcquire. |
| // Alternatively, we can elide generation of the dmb instructions |
| // and plant the alternative CompareAndSwap macro-instruction |
| // sequence (which uses ldaxr<x>). |
| // |
| // Of course, the above only applies when we see these signature |
| // configurations. We still want to plant dmb instructions in any |
| // other cases where we may see a MemBarAcquire, MemBarRelease or |
| // MemBarVolatile. For example, at the end of a constructor which |
| // writes final/volatile fields we will see a MemBarRelease |
| // instruction and this needs a 'dmb ish' lest we risk the |
| // constructed object being visible without making the |
| // final/volatile field writes visible. |
| // |
| // n.b. the translation rules below which rely on detection of the |
| // volatile signatures and insert ldar<x> or stlr<x> are failsafe. |
| // If we see anything other than the signature configurations we |
| // always just translate the loads and stores to ldr<x> and str<x> |
| // and translate acquire, release and volatile membars to the |
| // relevant dmb instructions. |
| // |
| |
| // graph traversal helpers used for volatile put/get and CAS |
| // optimization |
| |
| // 1) general purpose helpers |
| |
| // if node n is linked to a parent MemBarNode by an intervening |
| // Control and Memory ProjNode return the MemBarNode otherwise return |
| // NULL. |
| // |
| // n may only be a Load or a MemBar. |
| |
| MemBarNode *parent_membar(const Node *n) |
| { |
| Node *ctl = NULL; |
| Node *mem = NULL; |
| Node *membar = NULL; |
| |
| if (n->is_Load()) { |
| ctl = n->lookup(LoadNode::Control); |
| mem = n->lookup(LoadNode::Memory); |
| } else if (n->is_MemBar()) { |
| ctl = n->lookup(TypeFunc::Control); |
| mem = n->lookup(TypeFunc::Memory); |
| } else { |
| return NULL; |
| } |
| |
| if (!ctl || !mem || !ctl->is_Proj() || !mem->is_Proj()) { |
| return NULL; |
| } |
| |
| membar = ctl->lookup(0); |
| |
| if (!membar || !membar->is_MemBar()) { |
| return NULL; |
| } |
| |
| if (mem->lookup(0) != membar) { |
| return NULL; |
| } |
| |
| return membar->as_MemBar(); |
| } |
| |
| // if n is linked to a child MemBarNode by intervening Control and |
| // Memory ProjNodes return the MemBarNode otherwise return NULL. |
| |
| MemBarNode *child_membar(const MemBarNode *n) |
| { |
| ProjNode *ctl = n->proj_out(TypeFunc::Control); |
| ProjNode *mem = n->proj_out(TypeFunc::Memory); |
| |
| // MemBar needs to have both a Ctl and Mem projection |
| if (! ctl || ! mem) |
| return NULL; |
| |
| MemBarNode *child = NULL; |
| Node *x; |
| |
| for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) { |
| x = ctl->fast_out(i); |
| // if we see a membar we keep hold of it. we may also see a new |
| // arena copy of the original but it will appear later |
| if (x->is_MemBar()) { |
| child = x->as_MemBar(); |
| break; |
| } |
| } |
| |
| if (child == NULL) { |
| return NULL; |
| } |
| |
| for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { |
| x = mem->fast_out(i); |
| // if we see a membar we keep hold of it. we may also see a new |
| // arena copy of the original but it will appear later |
| if (x == child) { |
| return child; |
| } |
| } |
| return NULL; |
| } |
| |
| // helper predicate use to filter candidates for a leading memory |
| // barrier |
| // |
| // returns true if barrier is a MemBarRelease or a MemBarCPUOrder |
| // whose Ctl and Mem feeds come from a MemBarRelease otherwise false |
| |
| bool leading_membar(const MemBarNode *barrier) |
| { |
| int opcode = barrier->Opcode(); |
| // if this is a release membar we are ok |
| if (opcode == Op_MemBarRelease) { |
| return true; |
| } |
| // if its a cpuorder membar . . . |
| if (opcode != Op_MemBarCPUOrder) { |
| return false; |
| } |
| // then the parent has to be a release membar |
| MemBarNode *parent = parent_membar(barrier); |
| if (!parent) { |
| return false; |
| } |
| opcode = parent->Opcode(); |
| return opcode == Op_MemBarRelease; |
| } |
| |
| // 2) card mark detection helper |
| |
| // helper predicate which can be used to detect a volatile membar |
| // introduced as part of a conditional card mark sequence either by |
| // G1 or by CMS when UseCondCardMark is true. |
| // |
| // membar can be definitively determined to be part of a card mark |
| // sequence if and only if all the following hold |
| // |
| // i) it is a MemBarVolatile |
| // |
| // ii) either UseG1GC or (UseConcMarkSweepGC && UseCondCardMark) is |
| // true |
| // |
| // iii) the node's Mem projection feeds a StoreCM node. |
| |
| bool is_card_mark_membar(const MemBarNode *barrier) |
| { |
| if (!UseG1GC && !(UseConcMarkSweepGC && UseCondCardMark)) { |
| return false; |
| } |
| |
| if (barrier->Opcode() != Op_MemBarVolatile) { |
| return false; |
| } |
| |
| ProjNode *mem = barrier->proj_out(TypeFunc::Memory); |
| |
| for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax ; i++) { |
| Node *y = mem->fast_out(i); |
| if (y->Opcode() == Op_StoreCM) { |
| return true; |
| } |
| } |
| |
| return false; |
| } |
| |
| |
| // 3) helper predicates to traverse volatile put or CAS graphs which |
| // may contain GC barrier subgraphs |
| |
| // Preamble |
| // -------- |
| // |
| // for volatile writes we can omit generating barriers and employ a |
| // releasing store when we see a node sequence sequence with a |
| // leading MemBarRelease and a trailing MemBarVolatile as follows |
| // |
| // MemBarRelease |
| // { || } -- optional |
| // {MemBarCPUOrder} |
| // || \\ |
| // || StoreX[mo_release] |
| // | \ / |
| // | MergeMem |
| // | / |
| // MemBarVolatile |
| // |
| // where |
| // || and \\ represent Ctl and Mem feeds via Proj nodes |
| // | \ and / indicate further routing of the Ctl and Mem feeds |
| // |
| // this is the graph we see for non-object stores. however, for a |
| // volatile Object store (StoreN/P) we may see other nodes below the |
| // leading membar because of the need for a GC pre- or post-write |
| // barrier. |
| // |
| // with most GC configurations we with see this simple variant which |
| // includes a post-write barrier card mark. |
| // |
| // MemBarRelease______________________________ |
| // || \\ Ctl \ \\ |
| // || StoreN/P[mo_release] CastP2X StoreB/CM |
| // | \ / . . . / |
| // | MergeMem |
| // | / |
| // || / |
| // MemBarVolatile |
| // |
| // i.e. the leading membar feeds Ctl to a CastP2X (which converts |
| // the object address to an int used to compute the card offset) and |
| // Ctl+Mem to a StoreB node (which does the actual card mark). |
| // |
| // n.b. a StoreCM node will only appear in this configuration when |
| // using CMS. StoreCM differs from a normal card mark write (StoreB) |
| // because it implies a requirement to order visibility of the card |
| // mark (StoreCM) relative to the object put (StoreP/N) using a |
| // StoreStore memory barrier (arguably this ought to be represented |
| // explicitly in the ideal graph but that is not how it works). This |
| // ordering is required for both non-volatile and volatile |
| // puts. Normally that means we need to translate a StoreCM using |
| // the sequence |
| // |
| // dmb ishst |
| // stlrb |
| // |
| // However, in the case of a volatile put if we can recognise this |
| // configuration and plant an stlr for the object write then we can |
| // omit the dmb and just plant an strb since visibility of the stlr |
| // is ordered before visibility of subsequent stores. StoreCM nodes |
| // also arise when using G1 or using CMS with conditional card |
| // marking. In these cases (as we shall see) we don't need to insert |
| // the dmb when translating StoreCM because there is already an |
| // intervening StoreLoad barrier between it and the StoreP/N. |
| // |
| // It is also possible to perform the card mark conditionally on it |
| // currently being unmarked in which case the volatile put graph |
| // will look slightly different |
| // |
| // MemBarRelease____________________________________________ |
| // || \\ Ctl \ Ctl \ \\ Mem \ |
| // || StoreN/P[mo_release] CastP2X If LoadB | |
| // | \ / \ | |
| // | MergeMem . . . StoreB |
| // | / / |
| // || / |
| // MemBarVolatile |
| // |
| // It is worth noting at this stage that both the above |
| // configurations can be uniquely identified by checking that the |
| // memory flow includes the following subgraph: |
| // |
| // MemBarRelease |
| // {MemBarCPUOrder} |
| // | \ . . . |
| // | StoreX[mo_release] . . . |
| // | / |
| // MergeMem |
| // | |
| // MemBarVolatile |
| // |
| // This is referred to as a *normal* subgraph. It can easily be |
| // detected starting from any candidate MemBarRelease, |
| // StoreX[mo_release] or MemBarVolatile. |
| // |
| // A simple variation on this normal case occurs for an unsafe CAS |
| // operation. The basic graph for a non-object CAS is |
| // |
| // MemBarRelease |
| // || |
| // MemBarCPUOrder |
| // || \\ . . . |
| // || CompareAndSwapX |
| // || | |
| // || SCMemProj |
| // | \ / |
| // | MergeMem |
| // | / |
| // MemBarCPUOrder |
| // || |
| // MemBarAcquire |
| // |
| // The same basic variations on this arrangement (mutatis mutandis) |
| // occur when a card mark is introduced. i.e. we se the same basic |
| // shape but the StoreP/N is replaced with CompareAndSawpP/N and the |
| // tail of the graph is a pair comprising a MemBarCPUOrder + |
| // MemBarAcquire. |
| // |
| // So, in the case of a CAS the normal graph has the variant form |
| // |
| // MemBarRelease |
| // MemBarCPUOrder |
| // | \ . . . |
| // | CompareAndSwapX . . . |
| // | | |
| // | SCMemProj |
| // | / . . . |
| // MergeMem |
| // | |
| // MemBarCPUOrder |
| // MemBarAcquire |
| // |
| // This graph can also easily be detected starting from any |
| // candidate MemBarRelease, CompareAndSwapX or MemBarAcquire. |
| // |
| // the code below uses two helper predicates, leading_to_normal and |
| // normal_to_leading to identify these normal graphs, one validating |
| // the layout starting from the top membar and searching down and |
| // the other validating the layout starting from the lower membar |
| // and searching up. |
| // |
| // There are two special case GC configurations when a normal graph |
| // may not be generated: when using G1 (which always employs a |
| // conditional card mark); and when using CMS with conditional card |
| // marking configured. These GCs are both concurrent rather than |
| // stop-the world GCs. So they introduce extra Ctl+Mem flow into the |
| // graph between the leading and trailing membar nodes, in |
| // particular enforcing stronger memory serialisation beween the |
| // object put and the corresponding conditional card mark. CMS |
| // employs a post-write GC barrier while G1 employs both a pre- and |
| // post-write GC barrier. Of course the extra nodes may be absent -- |
| // they are only inserted for object puts. This significantly |
| // complicates the task of identifying whether a MemBarRelease, |
| // StoreX[mo_release] or MemBarVolatile forms part of a volatile put |
| // when using these GC configurations (see below). It adds similar |
| // complexity to the task of identifying whether a MemBarRelease, |
| // CompareAndSwapX or MemBarAcquire forms part of a CAS. |
| // |
| // In both cases the post-write subtree includes an auxiliary |
| // MemBarVolatile (StoreLoad barrier) separating the object put and |
| // the read of the corresponding card. This poses two additional |
| // problems. |
| // |
| // Firstly, a card mark MemBarVolatile needs to be distinguished |
| // from a normal trailing MemBarVolatile. Resolving this first |
| // problem is straightforward: a card mark MemBarVolatile always |
| // projects a Mem feed to a StoreCM node and that is a unique marker |
| // |
| // MemBarVolatile (card mark) |
| // C | \ . . . |
| // | StoreCM . . . |
| // . . . |
| // |
| // The second problem is how the code generator is to translate the |
| // card mark barrier? It always needs to be translated to a "dmb |
| // ish" instruction whether or not it occurs as part of a volatile |
| // put. A StoreLoad barrier is needed after the object put to ensure |
| // i) visibility to GC threads of the object put and ii) visibility |
| // to the mutator thread of any card clearing write by a GC |
| // thread. Clearly a normal store (str) will not guarantee this |
| // ordering but neither will a releasing store (stlr). The latter |
| // guarantees that the object put is visible but does not guarantee |
| // that writes by other threads have also been observed. |
| // |
| // So, returning to the task of translating the object put and the |
| // leading/trailing membar nodes: what do the non-normal node graph |
| // look like for these 2 special cases? and how can we determine the |
| // status of a MemBarRelease, StoreX[mo_release] or MemBarVolatile |
| // in both normal and non-normal cases? |
| // |
| // A CMS GC post-barrier wraps its card write (StoreCM) inside an If |
| // which selects conditonal execution based on the value loaded |
| // (LoadB) from the card. Ctl and Mem are fed to the If via an |
| // intervening StoreLoad barrier (MemBarVolatile). |
| // |
| // So, with CMS we may see a node graph for a volatile object store |
| // which looks like this |
| // |
| // MemBarRelease |
| // MemBarCPUOrder_(leading)__________________ |
| // C | M \ \\ C \ |
| // | \ StoreN/P[mo_release] CastP2X |
| // | Bot \ / |
| // | MergeMem |
| // | / |
| // MemBarVolatile (card mark) |
| // C | || M | |
| // | LoadB | |
| // | | | |
| // | Cmp |\ |
| // | / | \ |
| // If | \ |
| // | \ | \ |
| // IfFalse IfTrue | \ |
| // \ / \ | \ |
| // \ / StoreCM | |
| // \ / | | |
| // Region . . . | |
| // | \ / |
| // | . . . \ / Bot |
| // | MergeMem |
| // | | |
| // MemBarVolatile (trailing) |
| // |
| // The first MergeMem merges the AliasIdxBot Mem slice from the |
| // leading membar and the oopptr Mem slice from the Store into the |
| // card mark membar. The trailing MergeMem merges the AliasIdxBot |
| // Mem slice from the card mark membar and the AliasIdxRaw slice |
| // from the StoreCM into the trailing membar (n.b. the latter |
| // proceeds via a Phi associated with the If region). |
| // |
| // The graph for a CAS varies slightly, the obvious difference being |
| // that the StoreN/P node is replaced by a CompareAndSwapP/N node |
| // and the trailing MemBarVolatile by a MemBarCPUOrder + |
| // MemBarAcquire pair. The other important difference is that the |
| // CompareAndSwap node's SCMemProj is not merged into the card mark |
| // membar - it still feeds the trailing MergeMem. This also means |
| // that the card mark membar receives its Mem feed directly from the |
| // leading membar rather than via a MergeMem. |
| // |
| // MemBarRelease |
| // MemBarCPUOrder__(leading)_________________________ |
| // || \\ C \ |
| // MemBarVolatile (card mark) CompareAndSwapN/P CastP2X |
| // C | || M | | |
| // | LoadB | ______/| |
| // | | | / | |
| // | Cmp | / SCMemProj |
| // | / | / | |
| // If | / / |
| // | \ | / / |
| // IfFalse IfTrue | / / |
| // \ / \ |/ prec / |
| // \ / StoreCM / |
| // \ / | / |
| // Region . . . / |
| // | \ / |
| // | . . . \ / Bot |
| // | MergeMem |
| // | | |
| // MemBarCPUOrder |
| // MemBarAcquire (trailing) |
| // |
| // This has a slightly different memory subgraph to the one seen |
| // previously but the core of it is the same as for the CAS normal |
| // sungraph |
| // |
| // MemBarRelease |
| // MemBarCPUOrder____ |
| // || \ . . . |
| // MemBarVolatile CompareAndSwapX . . . |
| // | \ | |
| // . . . SCMemProj |
| // | / . . . |
| // MergeMem |
| // | |
| // MemBarCPUOrder |
| // MemBarAcquire |
| // |
| // |
| // G1 is quite a lot more complicated. The nodes inserted on behalf |
| // of G1 may comprise: a pre-write graph which adds the old value to |
| // the SATB queue; the releasing store itself; and, finally, a |
| // post-write graph which performs a card mark. |
| // |
| // The pre-write graph may be omitted, but only when the put is |
| // writing to a newly allocated (young gen) object and then only if |
| // there is a direct memory chain to the Initialize node for the |
| // object allocation. This will not happen for a volatile put since |
| // any memory chain passes through the leading membar. |
| // |
| // The pre-write graph includes a series of 3 If tests. The outermost |
| // If tests whether SATB is enabled (no else case). The next If tests |
| // whether the old value is non-NULL (no else case). The third tests |
| // whether the SATB queue index is > 0, if so updating the queue. The |
| // else case for this third If calls out to the runtime to allocate a |
| // new queue buffer. |
| // |
| // So with G1 the pre-write and releasing store subgraph looks like |
| // this (the nested Ifs are omitted). |
| // |
| // MemBarRelease (leading)____________ |
| // C | || M \ M \ M \ M \ . . . |
| // | LoadB \ LoadL LoadN \ |
| // | / \ \ |
| // If |\ \ |
| // | \ | \ \ |
| // IfFalse IfTrue | \ \ |
| // | | | \ | |
| // | If | /\ | |
| // | | \ | |
| // | \ | |
| // | . . . \ | |
| // | / | / | | |
| // Region Phi[M] | | |
| // | \ | | | |
| // | \_____ | ___ | | |
| // C | C \ | C \ M | | |
| // | CastP2X | StoreN/P[mo_release] | |
| // | | | | |
| // C | M | M | M | |
| // \ | | / |
| // . . . |
| // (post write subtree elided) |
| // . . . |
| // C \ M / |
| // MemBarVolatile (trailing) |
| // |
| // n.b. the LoadB in this subgraph is not the card read -- it's a |
| // read of the SATB queue active flag. |
| // |
| // Once again the CAS graph is a minor variant on the above with the |
| // expected substitutions of CompareAndSawpX for StoreN/P and |
| // MemBarCPUOrder + MemBarAcquire for trailing MemBarVolatile. |
| // |
| // The G1 post-write subtree is also optional, this time when the |
| // new value being written is either null or can be identified as a |
| // newly allocated (young gen) object with no intervening control |
| // flow. The latter cannot happen but the former may, in which case |
| // the card mark membar is omitted and the memory feeds form the |
| // leading membar and the SToreN/P are merged direct into the |
| // trailing membar as per the normal subgraph. So, the only special |
| // case which arises is when the post-write subgraph is generated. |
| // |
| // The kernel of the post-write G1 subgraph is the card mark itself |
| // which includes a card mark memory barrier (MemBarVolatile), a |
| // card test (LoadB), and a conditional update (If feeding a |
| // StoreCM). These nodes are surrounded by a series of nested Ifs |
| // which try to avoid doing the card mark. The top level If skips if |
| // the object reference does not cross regions (i.e. it tests if |
| // (adr ^ val) >> log2(regsize) != 0) -- intra-region references |
| // need not be recorded. The next If, which skips on a NULL value, |
| // may be absent (it is not generated if the type of value is >= |
| // OopPtr::NotNull). The 3rd If skips writes to young regions (by |
| // checking if card_val != young). n.b. although this test requires |
| // a pre-read of the card it can safely be done before the StoreLoad |
| // barrier. However that does not bypass the need to reread the card |
| // after the barrier. |
| // |
| // (pre-write subtree elided) |
| // . . . . . . . . . . . . |
| // C | M | M | M | |
| // Region Phi[M] StoreN | |
| // | / \ | | |
| // / \_______ / \ | | |
| // C / C \ . . . \ | | |
| // If CastP2X . . . | | | |
| // / \ | | | |
| // / \ | | | |
| // IfFalse IfTrue | | | |
| // | | | | /| |
| // | If | | / | |
| // | / \ | | / | |
| // | / \ \ | / | |
| // | IfFalse IfTrue MergeMem | |
| // | . . . / \ / | |
| // | / \ / | |
| // | IfFalse IfTrue / | |
| // | . . . | / | |
| // | If / | |
| // | / \ / | |
| // | / \ / | |
| // | IfFalse IfTrue / | |
| // | . . . | / | |
| // | \ / | |
| // | \ / | |
| // | MemBarVolatile__(card mark) | |
| // | || C | M \ M \ | |
| // | LoadB If | | | |
| // | / \ | | | |
| // | . . . | | | |
| // | \ | | / |
| // | StoreCM | / |
| // | . . . | / |
| // | _________/ / |
| // | / _____________/ |
| // | . . . . . . | / / |
| // | | | / _________/ |
| // | | Phi[M] / / |
| // | | | / / |
| // | | | / / |
| // | Region . . . Phi[M] _____/ |
| // | / | / |
| // | | / |
| // | . . . . . . | / |
| // | / | / |
| // Region | | Phi[M] |
| // | | | / Bot |
| // \ MergeMem |
| // \ / |
| // MemBarVolatile |
| // |
| // As with CMS the initial MergeMem merges the AliasIdxBot Mem slice |
| // from the leading membar and the oopptr Mem slice from the Store |
| // into the card mark membar i.e. the memory flow to the card mark |
| // membar still looks like a normal graph. |
| // |
| // The trailing MergeMem merges an AliasIdxBot Mem slice with other |
| // Mem slices (from the StoreCM and other card mark queue stores). |
| // However in this case the AliasIdxBot Mem slice does not come |
| // direct from the card mark membar. It is merged through a series |
| // of Phi nodes. These are needed to merge the AliasIdxBot Mem flow |
| // from the leading membar with the Mem feed from the card mark |
| // membar. Each Phi corresponds to one of the Ifs which may skip |
| // around the card mark membar. So when the If implementing the NULL |
| // value check has been elided the total number of Phis is 2 |
| // otherwise it is 3. |
| // |
| // The CAS graph when using G1GC also includes a pre-write subgraph |
| // and an optional post-write subgraph. Teh sam evarioations are |
| // introduced as for CMS with conditional card marking i.e. the |
| // StoreP/N is swapped for a CompareAndSwapP/N, the tariling |
| // MemBarVolatile for a MemBarCPUOrder + MemBarAcquire pair and the |
| // Mem feed from the CompareAndSwapP/N includes a precedence |
| // dependency feed to the StoreCM and a feed via an SCMemProj to the |
| // trailing membar. So, as before the configuration includes the |
| // normal CAS graph as a subgraph of the memory flow. |
| // |
| // So, the upshot is that in all cases the volatile put graph will |
| // include a *normal* memory subgraph betwen the leading membar and |
| // its child membar, either a volatile put graph (including a |
| // releasing StoreX) or a CAS graph (including a CompareAndSwapX). |
| // When that child is not a card mark membar then it marks the end |
| // of the volatile put or CAS subgraph. If the child is a card mark |
| // membar then the normal subgraph will form part of a volatile put |
| // subgraph if and only if the child feeds an AliasIdxBot Mem feed |
| // to a trailing barrier via a MergeMem. That feed is either direct |
| // (for CMS) or via 2 or 3 Phi nodes merging the leading barrier |
| // memory flow (for G1). |
| // |
| // The predicates controlling generation of instructions for store |
| // and barrier nodes employ a few simple helper functions (described |
| // below) which identify the presence or absence of all these |
| // subgraph configurations and provide a means of traversing from |
| // one node in the subgraph to another. |
| |
| // is_CAS(int opcode) |
| // |
| // return true if opcode is one of the possible CompareAndSwapX |
| // values otherwise false. |
| |
| bool is_CAS(int opcode) |
| { |
| switch(opcode) { |
| // We handle these |
| case Op_CompareAndSwapI: |
| case Op_CompareAndSwapL: |
| case Op_CompareAndSwapP: |
| case Op_CompareAndSwapN: |
| // case Op_CompareAndSwapB: |
| // case Op_CompareAndSwapS: |
| return true; |
| // These are TBD |
| case Op_WeakCompareAndSwapB: |
| case Op_WeakCompareAndSwapS: |
| case Op_WeakCompareAndSwapI: |
| case Op_WeakCompareAndSwapL: |
| case Op_WeakCompareAndSwapP: |
| case Op_WeakCompareAndSwapN: |
| case Op_CompareAndExchangeB: |
| case Op_CompareAndExchangeS: |
| case Op_CompareAndExchangeI: |
| case Op_CompareAndExchangeL: |
| case Op_CompareAndExchangeP: |
| case Op_CompareAndExchangeN: |
| return false; |
| default: |
| return false; |
| } |
| } |
| |
| |
| // leading_to_normal |
| // |
| //graph traversal helper which detects the normal case Mem feed from |
| // a release membar (or, optionally, its cpuorder child) to a |
| // dependent volatile membar i.e. it ensures that one or other of |
| // the following Mem flow subgraph is present. |
| // |
| // MemBarRelease |
| // MemBarCPUOrder {leading} |
| // | \ . . . |
| // | StoreN/P[mo_release] . . . |
| // | / |
| // MergeMem |
| // | |
| // MemBarVolatile {trailing or card mark} |
| // |
| // MemBarRelease |
| // MemBarCPUOrder {leading} |
| // | \ . . . |
| // | CompareAndSwapX . . . |
| // | |
| // . . . SCMemProj |
| // \ | |
| // | MergeMem |
| // | / |
| // MemBarCPUOrder |
| // MemBarAcquire {trailing} |
| // |
| // if the correct configuration is present returns the trailing |
| // membar otherwise NULL. |
| // |
| // the input membar is expected to be either a cpuorder membar or a |
| // release membar. in the latter case it should not have a cpu membar |
| // child. |
| // |
| // the returned value may be a card mark or trailing membar |
| // |
| |
| MemBarNode *leading_to_normal(MemBarNode *leading) |
| { |
| assert((leading->Opcode() == Op_MemBarRelease || |
| leading->Opcode() == Op_MemBarCPUOrder), |
| "expecting a volatile or cpuroder membar!"); |
| |
| // check the mem flow |
| ProjNode *mem = leading->proj_out(TypeFunc::Memory); |
| |
| if (!mem) { |
| return NULL; |
| } |
| |
| Node *x = NULL; |
| StoreNode * st = NULL; |
| LoadStoreNode *cas = NULL; |
| MergeMemNode *mm = NULL; |
| |
| for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { |
| x = mem->fast_out(i); |
| if (x->is_MergeMem()) { |
| if (mm != NULL) { |
| return NULL; |
| } |
| // two merge mems is one too many |
| mm = x->as_MergeMem(); |
| } else if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) { |
| // two releasing stores/CAS nodes is one too many |
| if (st != NULL || cas != NULL) { |
| return NULL; |
| } |
| st = x->as_Store(); |
| } else if (is_CAS(x->Opcode())) { |
| if (st != NULL || cas != NULL) { |
| return NULL; |
| } |
| cas = x->as_LoadStore(); |
| } |
| } |
| |
| // must have a store or a cas |
| if (!st && !cas) { |
| return NULL; |
| } |
| |
| // must have a merge if we also have st |
| if (st && !mm) { |
| return NULL; |
| } |
| |
| Node *y = NULL; |
| if (cas) { |
| // look for an SCMemProj |
| for (DUIterator_Fast imax, i = cas->fast_outs(imax); i < imax; i++) { |
| x = cas->fast_out(i); |
| if (x->is_Proj()) { |
| y = x; |
| break; |
| } |
| } |
| if (y == NULL) { |
| return NULL; |
| } |
| // the proj must feed a MergeMem |
| for (DUIterator_Fast imax, i = y->fast_outs(imax); i < imax; i++) { |
| x = y->fast_out(i); |
| if (x->is_MergeMem()) { |
| mm = x->as_MergeMem(); |
| break; |
| } |
| } |
| if (mm == NULL) |
| return NULL; |
| } else { |
| // ensure the store feeds the existing mergemem; |
| for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) { |
| if (st->fast_out(i) == mm) { |
| y = st; |
| break; |
| } |
| } |
| if (y == NULL) { |
| return NULL; |
| } |
| } |
| |
| MemBarNode *mbar = NULL; |
| // ensure the merge feeds to the expected type of membar |
| for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) { |
| x = mm->fast_out(i); |
| if (x->is_MemBar()) { |
| int opcode = x->Opcode(); |
| if (opcode == Op_MemBarVolatile && st) { |
| mbar = x->as_MemBar(); |
| } else if (cas && opcode == Op_MemBarCPUOrder) { |
| MemBarNode *y = x->as_MemBar(); |
| y = child_membar(y); |
| if (y != NULL && y->Opcode() == Op_MemBarAcquire) { |
| mbar = y; |
| } |
| } |
| break; |
| } |
| } |
| |
| return mbar; |
| } |
| |
| // normal_to_leading |
| // |
| // graph traversal helper which detects the normal case Mem feed |
| // from either a card mark or a trailing membar to a preceding |
| // release membar (optionally its cpuorder child) i.e. it ensures |
| // that one or other of the following Mem flow subgraphs is present. |
| // |
| // MemBarRelease |
| // MemBarCPUOrder {leading} |
| // | \ . . . |
| // | StoreN/P[mo_release] . . . |
| // | / |
| // MergeMem |
| // | |
| // MemBarVolatile {card mark or trailing} |
| // |
| // MemBarRelease |
| // MemBarCPUOrder {leading} |
| // | \ . . . |
| // | CompareAndSwapX . . . |
| // | |
| // . . . SCMemProj |
| // \ | |
| // | MergeMem |
| // | / |
| // MemBarCPUOrder |
| // MemBarAcquire {trailing} |
| // |
| // this predicate checks for the same flow as the previous predicate |
| // but starting from the bottom rather than the top. |
| // |
| // if the configuration is present returns the cpuorder member for |
| // preference or when absent the release membar otherwise NULL. |
| // |
| // n.b. the input membar is expected to be a MemBarVolatile but |
| // need not be a card mark membar. |
| |
| MemBarNode *normal_to_leading(const MemBarNode *barrier) |
| { |
| // input must be a volatile membar |
| assert((barrier->Opcode() == Op_MemBarVolatile || |
| barrier->Opcode() == Op_MemBarAcquire), |
| "expecting a volatile or an acquire membar"); |
| Node *x; |
| bool is_cas = barrier->Opcode() == Op_MemBarAcquire; |
| |
| // if we have an acquire membar then it must be fed via a CPUOrder |
| // membar |
| |
| if (is_cas) { |
| // skip to parent barrier which must be a cpuorder |
| x = parent_membar(barrier); |
| if (x->Opcode() != Op_MemBarCPUOrder) |
| return NULL; |
| } else { |
| // start from the supplied barrier |
| x = (Node *)barrier; |
| } |
| |
| // the Mem feed to the membar should be a merge |
| x = x ->in(TypeFunc::Memory); |
| if (!x->is_MergeMem()) |
| return NULL; |
| |
| MergeMemNode *mm = x->as_MergeMem(); |
| |
| if (is_cas) { |
| // the merge should be fed from the CAS via an SCMemProj node |
| x = NULL; |
| for (uint idx = 1; idx < mm->req(); idx++) { |
| if (mm->in(idx)->Opcode() == Op_SCMemProj) { |
| x = mm->in(idx); |
| break; |
| } |
| } |
| if (x == NULL) { |
| return NULL; |
| } |
| // check for a CAS feeding this proj |
| x = x->in(0); |
| int opcode = x->Opcode(); |
| if (!is_CAS(opcode)) { |
| return NULL; |
| } |
| // the CAS should get its mem feed from the leading membar |
| x = x->in(MemNode::Memory); |
| } else { |
| // the merge should get its Bottom mem feed from the leading membar |
| x = mm->in(Compile::AliasIdxBot); |
| } |
| |
| // ensure this is a non control projection |
| if (!x->is_Proj() || x->is_CFG()) { |
| return NULL; |
| } |
| // if it is fed by a membar that's the one we want |
| x = x->in(0); |
| |
| if (!x->is_MemBar()) { |
| return NULL; |
| } |
| |
| MemBarNode *leading = x->as_MemBar(); |
| // reject invalid candidates |
| if (!leading_membar(leading)) { |
| return NULL; |
| } |
| |
| // ok, we have a leading membar, now for the sanity clauses |
| |
| // the leading membar must feed Mem to a releasing store or CAS |
| ProjNode *mem = leading->proj_out(TypeFunc::Memory); |
| StoreNode *st = NULL; |
| LoadStoreNode *cas = NULL; |
| for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { |
| x = mem->fast_out(i); |
| if (x->is_Store() && x->as_Store()->is_release() && x->Opcode() != Op_StoreCM) { |
| // two stores or CASes is one too many |
| if (st != NULL || cas != NULL) { |
| return NULL; |
| } |
| st = x->as_Store(); |
| } else if (is_CAS(x->Opcode())) { |
| if (st != NULL || cas != NULL) { |
| return NULL; |
| } |
| cas = x->as_LoadStore(); |
| } |
| } |
| |
| // we should not have both a store and a cas |
| if (st == NULL & cas == NULL) { |
| return NULL; |
| } |
| |
| if (st == NULL) { |
| // nothing more to check |
| return leading; |
| } else { |
| // we should not have a store if we started from an acquire |
| if (is_cas) { |
| return NULL; |
| } |
| |
| // the store should feed the merge we used to get here |
| for (DUIterator_Fast imax, i = st->fast_outs(imax); i < imax; i++) { |
| if (st->fast_out(i) == mm) { |
| return leading; |
| } |
| } |
| } |
| |
| return NULL; |
| } |
| |
| // card_mark_to_trailing |
| // |
| // graph traversal helper which detects extra, non-normal Mem feed |
| // from a card mark volatile membar to a trailing membar i.e. it |
| // ensures that one of the following three GC post-write Mem flow |
| // subgraphs is present. |
| // |
| // 1) |
| // . . . |
| // | |
| // MemBarVolatile (card mark) |
| // | | |
| // | StoreCM |
| // | | |
| // | . . . |
| // Bot | / |
| // MergeMem |
| // | |
| // | |
| // MemBarVolatile {trailing} |
| // |
| // 2) |
| // MemBarRelease/CPUOrder (leading) |
| // | |
| // | |
| // |\ . . . |
| // | \ | |
| // | \ MemBarVolatile (card mark) |
| // | \ | | |
| // \ \ | StoreCM . . . |
| // \ \ | |
| // \ Phi |
| // \ / |
| // Phi . . . |
| // Bot | / |
| // MergeMem |
| // | |
| // MemBarVolatile {trailing} |
| // |
| // |
| // 3) |
| // MemBarRelease/CPUOrder (leading) |
| // | |
| // |\ |
| // | \ |
| // | \ . . . |
| // | \ | |
| // |\ \ MemBarVolatile (card mark) |
| // | \ \ | | |
| // | \ \ | StoreCM . . . |
| // | \ \ | |
| // \ \ Phi |
| // \ \ / |
| // \ Phi |
| // \ / |
| // Phi . . . |
| // Bot | / |
| // MergeMem |
| // | |
| // | |
| // MemBarVolatile {trailing} |
| // |
| // configuration 1 is only valid if UseConcMarkSweepGC && |
| // UseCondCardMark |
| // |
| // configurations 2 and 3 are only valid if UseG1GC. |
| // |
| // if a valid configuration is present returns the trailing membar |
| // otherwise NULL. |
| // |
| // n.b. the supplied membar is expected to be a card mark |
| // MemBarVolatile i.e. the caller must ensure the input node has the |
| // correct operand and feeds Mem to a StoreCM node |
| |
| MemBarNode *card_mark_to_trailing(const MemBarNode *barrier) |
| { |
| // input must be a card mark volatile membar |
| assert(is_card_mark_membar(barrier), "expecting a card mark membar"); |
| |
| Node *feed = barrier->proj_out(TypeFunc::Memory); |
| Node *x; |
| MergeMemNode *mm = NULL; |
| |
| const int MAX_PHIS = 3; // max phis we will search through |
| int phicount = 0; // current search count |
| |
| bool retry_feed = true; |
| while (retry_feed) { |
| // see if we have a direct MergeMem feed |
| for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) { |
| x = feed->fast_out(i); |
| // the correct Phi will be merging a Bot memory slice |
| if (x->is_MergeMem()) { |
| mm = x->as_MergeMem(); |
| break; |
| } |
| } |
| if (mm) { |
| retry_feed = false; |
| } else if (UseG1GC & phicount++ < MAX_PHIS) { |
| // the barrier may feed indirectly via one or two Phi nodes |
| PhiNode *phi = NULL; |
| for (DUIterator_Fast imax, i = feed->fast_outs(imax); i < imax; i++) { |
| x = feed->fast_out(i); |
| // the correct Phi will be merging a Bot memory slice |
| if (x->is_Phi() && x->adr_type() == TypePtr::BOTTOM) { |
| phi = x->as_Phi(); |
| break; |
| } |
| } |
| if (!phi) { |
| return NULL; |
| } |
| // look for another merge below this phi |
| feed = phi; |
| } else { |
| // couldn't find a merge |
| return NULL; |
| } |
| } |
| |
| // sanity check this feed turns up as the expected slice |
| assert(mm->as_MergeMem()->in(Compile::AliasIdxBot) == feed, "expecting membar to feed AliasIdxBot slice to Merge"); |
| |
| MemBarNode *trailing = NULL; |
| // be sure we have a trailing membar the merge |
| for (DUIterator_Fast imax, i = mm->fast_outs(imax); i < imax; i++) { |
| x = mm->fast_out(i); |
| if (x->is_MemBar() && x->Opcode() == Op_MemBarVolatile) { |
| trailing = x->as_MemBar(); |
| break; |
| } |
| } |
| |
| return trailing; |
| } |
| |
| // trailing_to_card_mark |
| // |
| // graph traversal helper which detects extra, non-normal Mem feed |
| // from a trailing volatile membar to a preceding card mark volatile |
| // membar i.e. it identifies whether one of the three possible extra |
| // GC post-write Mem flow subgraphs is present |
| // |
| // this predicate checks for the same flow as the previous predicate |
| // but starting from the bottom rather than the top. |
| // |
| // if the configuration is present returns the card mark membar |
| // otherwise NULL |
| // |
| // n.b. the supplied membar is expected to be a trailing |
| // MemBarVolatile i.e. the caller must ensure the input node has the |
| // correct opcode |
| |
| MemBarNode *trailing_to_card_mark(const MemBarNode *trailing) |
| { |
| assert(trailing->Opcode() == Op_MemBarVolatile, |
| "expecting a volatile membar"); |
| assert(!is_card_mark_membar(trailing), |
| "not expecting a card mark membar"); |
| |
| // the Mem feed to the membar should be a merge |
| Node *x = trailing->in(TypeFunc::Memory); |
| if (!x->is_MergeMem()) { |
| return NULL; |
| } |
| |
| MergeMemNode *mm = x->as_MergeMem(); |
| |
| x = mm->in(Compile::AliasIdxBot); |
| // with G1 we may possibly see a Phi or two before we see a Memory |
| // Proj from the card mark membar |
| |
| const int MAX_PHIS = 3; // max phis we will search through |
| int phicount = 0; // current search count |
| |
| bool retry_feed = !x->is_Proj(); |
| |
| while (retry_feed) { |
| if (UseG1GC && x->is_Phi() && phicount++ < MAX_PHIS) { |
| PhiNode *phi = x->as_Phi(); |
| ProjNode *proj = NULL; |
| PhiNode *nextphi = NULL; |
| bool found_leading = false; |
| for (uint i = 1; i < phi->req(); i++) { |
| x = phi->in(i); |
| if (x->is_Phi()) { |
| nextphi = x->as_Phi(); |
| } else if (x->is_Proj()) { |
| int opcode = x->in(0)->Opcode(); |
| if (opcode == Op_MemBarVolatile) { |
| proj = x->as_Proj(); |
| } else if (opcode == Op_MemBarRelease || |
| opcode == Op_MemBarCPUOrder) { |
| // probably a leading membar |
| found_leading = true; |
| } |
| } |
| } |
| // if we found a correct looking proj then retry from there |
| // otherwise we must see a leading and a phi or this the |
| // wrong config |
| if (proj != NULL) { |
| x = proj; |
| retry_feed = false; |
| } else if (found_leading && nextphi != NULL) { |
| // retry from this phi to check phi2 |
| x = nextphi; |
| } else { |
| // not what we were looking for |
| return NULL; |
| } |
| } else { |
| return NULL; |
| } |
| } |
| // the proj has to come from the card mark membar |
| x = x->in(0); |
| if (!x->is_MemBar()) { |
| return NULL; |
| } |
| |
| MemBarNode *card_mark_membar = x->as_MemBar(); |
| |
| if (!is_card_mark_membar(card_mark_membar)) { |
| return NULL; |
| } |
| |
| return card_mark_membar; |
| } |
| |
| // trailing_to_leading |
| // |
| // graph traversal helper which checks the Mem flow up the graph |
| // from a (non-card mark) trailing membar attempting to locate and |
| // return an associated leading membar. it first looks for a |
| // subgraph in the normal configuration (relying on helper |
| // normal_to_leading). failing that it then looks for one of the |
| // possible post-write card mark subgraphs linking the trailing node |
| // to a the card mark membar (relying on helper |
| // trailing_to_card_mark), and then checks that the card mark membar |
| // is fed by a leading membar (once again relying on auxiliary |
| // predicate normal_to_leading). |
| // |
| // if the configuration is valid returns the cpuorder member for |
| // preference or when absent the release membar otherwise NULL. |
| // |
| // n.b. the input membar is expected to be either a volatile or |
| // acquire membar but in the former case must *not* be a card mark |
| // membar. |
| |
| MemBarNode *trailing_to_leading(const MemBarNode *trailing) |
| { |
| assert((trailing->Opcode() == Op_MemBarAcquire || |
| trailing->Opcode() == Op_MemBarVolatile), |
| "expecting an acquire or volatile membar"); |
| assert((trailing->Opcode() != Op_MemBarVolatile || |
| !is_card_mark_membar(trailing)), |
| "not expecting a card mark membar"); |
| |
| MemBarNode *leading = normal_to_leading(trailing); |
| |
| if (leading) { |
| return leading; |
| } |
| |
| // nothing more to do if this is an acquire |
| if (trailing->Opcode() == Op_MemBarAcquire) { |
| return NULL; |
| } |
| |
| MemBarNode *card_mark_membar = trailing_to_card_mark(trailing); |
| |
| if (!card_mark_membar) { |
| return NULL; |
| } |
| |
| return normal_to_leading(card_mark_membar); |
| } |
| |
| // predicates controlling emit of ldr<x>/ldar<x> and associated dmb |
| |
| bool unnecessary_acquire(const Node *barrier) |
| { |
| assert(barrier->is_MemBar(), "expecting a membar"); |
| |
| if (UseBarriersForVolatile) { |
| // we need to plant a dmb |
| return false; |
| } |
| |
| // a volatile read derived from bytecode (or also from an inlined |
| // SHA field read via LibraryCallKit::load_field_from_object) |
| // manifests as a LoadX[mo_acquire] followed by an acquire membar |
| // with a bogus read dependency on it's preceding load. so in those |
| // cases we will find the load node at the PARMS offset of the |
| // acquire membar. n.b. there may be an intervening DecodeN node. |
| // |
| // a volatile load derived from an inlined unsafe field access |
| // manifests as a cpuorder membar with Ctl and Mem projections |
| // feeding both an acquire membar and a LoadX[mo_acquire]. The |
| // acquire then feeds another cpuorder membar via Ctl and Mem |
| // projections. The load has no output dependency on these trailing |
| // membars because subsequent nodes inserted into the graph take |
| // their control feed from the final membar cpuorder meaning they |
| // are all ordered after the load. |
| |
| Node *x = barrier->lookup(TypeFunc::Parms); |
| if (x) { |
| // we are starting from an acquire and it has a fake dependency |
| // |
| // need to check for |
| // |
| // LoadX[mo_acquire] |
| // { |1 } |
| // {DecodeN} |
| // |Parms |
| // MemBarAcquire* |
| // |
| // where * tags node we were passed |
| // and |k means input k |
| if (x->is_DecodeNarrowPtr()) { |
| x = x->in(1); |
| } |
| |
| return (x->is_Load() && x->as_Load()->is_acquire()); |
| } |
| |
| // now check for an unsafe volatile get |
| |
| // need to check for |
| // |
| // MemBarCPUOrder |
| // || \\ |
| // MemBarAcquire* LoadX[mo_acquire] |
| // || |
| // MemBarCPUOrder |
| // |
| // where * tags node we were passed |
| // and || or \\ are Ctl+Mem feeds via intermediate Proj Nodes |
| |
| // check for a parent MemBarCPUOrder |
| ProjNode *ctl; |
| ProjNode *mem; |
| MemBarNode *parent = parent_membar(barrier); |
| if (!parent || parent->Opcode() != Op_MemBarCPUOrder) |
| return false; |
| ctl = parent->proj_out(TypeFunc::Control); |
| mem = parent->proj_out(TypeFunc::Memory); |
| if (!ctl || !mem) { |
| return false; |
| } |
| // ensure the proj nodes both feed a LoadX[mo_acquire] |
| LoadNode *ld = NULL; |
| for (DUIterator_Fast imax, i = ctl->fast_outs(imax); i < imax; i++) { |
| x = ctl->fast_out(i); |
| // if we see a load we keep hold of it and stop searching |
| if (x->is_Load()) { |
| ld = x->as_Load(); |
| break; |
| } |
| } |
| // it must be an acquiring load |
| if (ld && ld->is_acquire()) { |
| |
| for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) { |
| x = mem->fast_out(i); |
| // if we see the same load we drop it and stop searching |
| if (x == ld) { |
| ld = NULL; |
| break; |
| } |
| } |
| // we must have dropped the load |
| if (ld == NULL) { |
| // check for a child cpuorder membar |
| MemBarNode *child = child_membar(barrier->as_MemBar()); |
| if (child && child->Opcode() == Op_MemBarCPUOrder) |
| return true; |
| } |
| } |
| |
| // final option for unnecessary mebar is that it is a trailing node |
| // belonging to a CAS |
| |
| MemBarNode *leading = trailing_to_leading(barrier->as_MemBar()); |
| |
| return leading != NULL; |
| } |
| |
| bool needs_acquiring_load(const Node *n) |
| { |
| assert(n->is_Load(), "expecting a load"); |
| if (UseBarriersForVolatile) { |
| // we use a normal load and a dmb |
| return false; |
| } |
| |
| LoadNode *ld = n->as_Load(); |
| |
| if (!ld->is_acquire()) { |
| return false; |
| } |
| |
| // check if this load is feeding an acquire membar |
| // |
| // LoadX[mo_acquire] |
| // { |1 } |
| // {DecodeN} |
| // |Parms |
| // MemBarAcquire* |
| // |
| // where * tags node we were passed |
| // and |k means input k |
| |
| Node *start = ld; |
| Node *mbacq = NULL; |
| |
| // if we hit a DecodeNarrowPtr we reset the start node and restart |
| // the search through the outputs |
| restart: |
| |
| for (DUIterator_Fast imax, i = start->fast_outs(imax); i < imax; i++) { |
| Node *x = start->fast_out(i); |
| if (x->is_MemBar() && x->Opcode() == Op_MemBarAcquire) { |
| mbacq = x; |
| } else if (!mbacq && |
| (x->is_DecodeNarrowPtr() || |
| (x->is_Mach() && x->Opcode() == Op_DecodeN))) { |
| start = x; |
| goto restart; |
| } |
| } |
| |
| if (mbacq) { |
| return true; |
| } |
| |
| // now check for an unsafe volatile get |
| |
| // check if Ctl and Proj feed comes from a MemBarCPUOrder |
| // |
| // MemBarCPUOrder |
| // || \\ |
| // MemBarAcquire* LoadX[mo_acquire] |
| // || |
| // MemBarCPUOrder |
| |
| MemBarNode *membar; |
| |
| membar = parent_membar(ld); |
| |
| if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) { |
| return false; |
| } |
| |
| // ensure that there is a CPUOrder->Acquire->CPUOrder membar chain |
| |
| membar = child_membar(membar); |
| |
| if (!membar || !membar->Opcode() == Op_MemBarAcquire) { |
| return false; |
| } |
| |
| membar = child_membar(membar); |
| |
| if (!membar || !membar->Opcode() == Op_MemBarCPUOrder) { |
| return false; |
| } |
| |
| return true; |
| } |
| |
| bool unnecessary_release(const Node *n) |
| { |
| assert((n->is_MemBar() && |
| n->Opcode() == Op_MemBarRelease), |
| "expecting a release membar"); |
| |
| if (UseBarriersForVolatile) { |
| // we need to plant a dmb |
| return false; |
| } |
| |
| // if there is a dependent CPUOrder barrier then use that as the |
| // leading |
| |
| MemBarNode *barrier = n->as_MemBar(); |
| // check for an intervening cpuorder membar |
| MemBarNode *b = child_membar(barrier); |
| if (b && b->Opcode() == Op_MemBarCPUOrder) { |
| // ok, so start the check from the dependent cpuorder barrier |
| barrier = b; |
| } |
| |
| // must start with a normal feed |
| MemBarNode *child_barrier = leading_to_normal(barrier); |
| |
| if (!child_barrier) { |
| return false; |
| } |
| |
| if (!is_card_mark_membar(child_barrier)) { |
| // this is the trailing membar and we are done |
| return true; |
| } |
| |
| // must be sure this card mark feeds a trailing membar |
| MemBarNode *trailing = card_mark_to_trailing(child_barrier); |
| return (trailing != NULL); |
| } |
| |
| bool unnecessary_volatile(const Node *n) |
| { |
| // assert n->is_MemBar(); |
| if (UseBarriersForVolatile) { |
| // we need to plant a dmb |
| return false; |
| } |
| |
| MemBarNode *mbvol = n->as_MemBar(); |
| |
| // first we check if this is part of a card mark. if so then we have |
| // to generate a StoreLoad barrier |
| |
| if (is_card_mark_membar(mbvol)) { |
| return false; |
| } |
| |
| // ok, if it's not a card mark then we still need to check if it is |
| // a trailing membar of a volatile put hgraph. |
| |
| return (trailing_to_leading(mbvol) != NULL); |
| } |
| |
| // predicates controlling emit of str<x>/stlr<x> and associated dmbs |
| |
| bool needs_releasing_store(const Node *n) |
| { |
| // assert n->is_Store(); |
| if (UseBarriersForVolatile) { |
| // we use a normal store and dmb combination |
| return false; |
| } |
| |
| StoreNode *st = n->as_Store(); |
| |
| // the store must be marked as releasing |
| if (!st->is_release()) { |
| return false; |
| } |
| |
| // the store must be fed by a membar |
| |
| Node *x = st->lookup(StoreNode::Memory); |
| |
| if (! x || !x->is_Proj()) { |
| return false; |
| } |
| |
| ProjNode *proj = x->as_Proj(); |
| |
| x = proj->lookup(0); |
| |
| if (!x || !x->is_MemBar()) { |
| return false; |
| } |
| |
| MemBarNode *barrier = x->as_MemBar(); |
| |
| // if the barrier is a release membar or a cpuorder mmebar fed by a |
| // release membar then we need to check whether that forms part of a |
| // volatile put graph. |
| |
| // reject invalid candidates |
| if (!leading_membar(barrier)) { |
| return false; |
| } |
| |
| // does this lead a normal subgraph? |
| MemBarNode *mbvol = leading_to_normal(barrier); |
| |
| if (!mbvol) { |
| return false; |
| } |
| |
| // all done unless this is a card mark |
| if (!is_card_mark_membar(mbvol)) { |
| return true; |
| } |
| |
| // we found a card mark -- just make sure we have a trailing barrier |
| |
| return (card_mark_to_trailing(mbvol) != NULL); |
| } |
| |
| // predicate controlling translation of CAS |
| // |
| // returns true if CAS needs to use an acquiring load otherwise false |
| |
| bool needs_acquiring_load_exclusive(const Node *n) |
| { |
| assert(is_CAS(n->Opcode()), "expecting a compare and swap"); |
| if (UseBarriersForVolatile) { |
| return false; |
| } |
| |
| // CAS nodes only ought to turn up in inlined unsafe CAS operations |
| #ifdef ASSERT |
| LoadStoreNode *st = n->as_LoadStore(); |
| |
| // the store must be fed by a membar |
| |
| Node *x = st->lookup(StoreNode::Memory); |
| |
| assert (x && x->is_Proj(), "CAS not fed by memory proj!"); |
| |
| ProjNode *proj = x->as_Proj(); |
| |
| x = proj->lookup(0); |
| |
| assert (x && x->is_MemBar(), "CAS not fed by membar!"); |
| |
| MemBarNode *barrier = x->as_MemBar(); |
| |
| // the barrier must be a cpuorder mmebar fed by a release membar |
| |
| assert(barrier->Opcode() == Op_MemBarCPUOrder, |
| "CAS not fed by cpuorder membar!"); |
| |
| MemBarNode *b = parent_membar(barrier); |
| assert ((b != NULL && b->Opcode() == Op_MemBarRelease), |
| "CAS not fed by cpuorder+release membar pair!"); |
| |
| // does this lead a normal subgraph? |
| MemBarNode *mbar = leading_to_normal(barrier); |
| |
| assert(mbar != NULL, "CAS not embedded in normal graph!"); |
| |
| assert(mbar->Opcode() == Op_MemBarAcquire, "trailing membar should be an acquire"); |
| #endif // ASSERT |
| // so we can just return true here |
| return true; |
| } |
| |
| // predicate controlling translation of StoreCM |
| // |
| // returns true if a StoreStore must precede the card write otherwise |
| // false |
| |
| bool unnecessary_storestore(const Node *storecm) |
| { |
| assert(storecm->Opcode() == Op_StoreCM, "expecting a StoreCM"); |
| |
| // we only ever need to generate a dmb ishst between an object put |
| // and the associated card mark when we are using CMS without |
| // conditional card marking |
| |
| if (!UseConcMarkSweepGC || UseCondCardMark) { |
| return true; |
| } |
| |
| // if we are implementing volatile puts using barriers then the |
| // object put as an str so we must insert the dmb ishst |
| |
| if (UseBarriersForVolatile) { |
| return false; |
| } |
| |
| // we can omit the dmb ishst if this StoreCM is part of a volatile |
| // put because in thta case the put will be implemented by stlr |
| // |
| // we need to check for a normal subgraph feeding this StoreCM. |
| // that means the StoreCM must be fed Memory from a leading membar, |
| // either a MemBarRelease or its dependent MemBarCPUOrder, and the |
| // leading membar must be part of a normal subgraph |
| |
| Node *x = storecm->in(StoreNode::Memory); |
| |
| if (!x->is_Proj()) { |
| return false; |
| } |
| |
| x = x->in(0); |
| |
| if (!x->is_MemBar()) { |
| return false; |
| } |
| |
| MemBarNode *leading = x->as_MemBar(); |
| |
| // reject invalid candidates |
| if (!leading_membar(leading)) { |
| return false; |
| } |
| |
| // we can omit the StoreStore if it is the head of a normal subgraph |
| return (leading_to_normal(leading) != NULL); |
| } |
| |
| |
| #define __ _masm. |
| |
| // advance declarations for helper functions to convert register |
| // indices to register objects |
| |
| // the ad file has to provide implementations of certain methods |
| // expected by the generic code |
| // |
| // REQUIRED FUNCTIONALITY |
| |
| //============================================================================= |
| |
| // !!!!! Special hack to get all types of calls to specify the byte offset |
| // from the start of the call to the point where the return address |
| // will point. |
| |
| int MachCallStaticJavaNode::ret_addr_offset() |
| { |
| // call should be a simple bl |
| int off = 4; |
| return off; |
| } |
| |
| int MachCallDynamicJavaNode::ret_addr_offset() |
| { |
| return 16; // movz, movk, movk, bl |
| } |
| |
| int MachCallRuntimeNode::ret_addr_offset() { |
| // for generated stubs the call will be |
| // far_call(addr) |
| // for real runtime callouts it will be six instructions |
| // see aarch64_enc_java_to_runtime |
| // adr(rscratch2, retaddr) |
| // lea(rscratch1, RuntimeAddress(addr) |
| // stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize))) |
| // blrt rscratch1 |
| CodeBlob *cb = CodeCache::find_blob(_entry_point); |
| if (cb) { |
| return MacroAssembler::far_branch_size(); |
| } else { |
| return 6 * NativeInstruction::instruction_size; |
| } |
| } |
| |
| // Indicate if the safepoint node needs the polling page as an input |
| |
| // the shared code plants the oop data at the start of the generated |
| // code for the safepoint node and that needs ot be at the load |
| // instruction itself. so we cannot plant a mov of the safepoint poll |
| // address followed by a load. setting this to true means the mov is |
| // scheduled as a prior instruction. that's better for scheduling |
| // anyway. |
| |
| bool SafePointNode::needs_polling_address_input() |
| { |
| return true; |
| } |
| |
| //============================================================================= |
| |
| #ifndef PRODUCT |
| void MachBreakpointNode::format(PhaseRegAlloc *ra_, outputStream *st) const { |
| st->print("BREAKPOINT"); |
| } |
| #endif |
| |
| void MachBreakpointNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { |
| MacroAssembler _masm(&cbuf); |
| __ brk(0); |
| } |
| |
| uint MachBreakpointNode::size(PhaseRegAlloc *ra_) const { |
| return MachNode::size(ra_); |
| } |
| |
| //============================================================================= |
| |
| #ifndef PRODUCT |
| void MachNopNode::format(PhaseRegAlloc*, outputStream* st) const { |
| st->print("nop \t# %d bytes pad for loops and calls", _count); |
| } |
| #endif |
| |
| void MachNopNode::emit(CodeBuffer &cbuf, PhaseRegAlloc*) const { |
| MacroAssembler _masm(&cbuf); |
| for (int i = 0; i < _count; i++) { |
| __ nop(); |
| } |
| } |
| |
| uint MachNopNode::size(PhaseRegAlloc*) const { |
| return _count * NativeInstruction::instruction_size; |
| } |
| |
| //============================================================================= |
| const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty; |
| |
| int Compile::ConstantTable::calculate_table_base_offset() const { |
| return 0; // absolute addressing, no offset |
| } |
| |
| bool MachConstantBaseNode::requires_postalloc_expand() const { return false; } |
| void MachConstantBaseNode::postalloc_expand(GrowableArray <Node *> *nodes, PhaseRegAlloc *ra_) { |
| ShouldNotReachHere(); |
| } |
| |
| void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const { |
| // Empty encoding |
| } |
| |
| uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const { |
| return 0; |
| } |
| |
| #ifndef PRODUCT |
| void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const { |
| st->print("-- \t// MachConstantBaseNode (empty encoding)"); |
| } |
| #endif |
| |
| #ifndef PRODUCT |
| void MachPrologNode::format(PhaseRegAlloc *ra_, outputStream *st) const { |
| Compile* C = ra_->C; |
| |
| int framesize = C->frame_slots() << LogBytesPerInt; |
| |
| if (C->need_stack_bang(framesize)) |
| st->print("# stack bang size=%d\n\t", framesize); |
| |
| if (framesize < ((1 << 9) + 2 * wordSize)) { |
| st->print("sub sp, sp, #%d\n\t", framesize); |
| st->print("stp rfp, lr, [sp, #%d]", framesize - 2 * wordSize); |
| if (PreserveFramePointer) st->print("\n\tadd rfp, sp, #%d", framesize - 2 * wordSize); |
| } else { |
| st->print("stp lr, rfp, [sp, #%d]!\n\t", -(2 * wordSize)); |
| if (PreserveFramePointer) st->print("mov rfp, sp\n\t"); |
| st->print("mov rscratch1, #%d\n\t", framesize - 2 * wordSize); |
| st->print("sub sp, sp, rscratch1"); |
| } |
| } |
| #endif |
| |
| void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { |
| Compile* C = ra_->C; |
| MacroAssembler _masm(&cbuf); |
| |
| // n.b. frame size includes space for return pc and rfp |
| const long framesize = C->frame_size_in_bytes(); |
| assert(framesize%(2*wordSize) == 0, "must preserve 2*wordSize alignment"); |
| |
| // insert a nop at the start of the prolog so we can patch in a |
| // branch if we need to invalidate the method later |
| __ nop(); |
| |
| int bangsize = C->bang_size_in_bytes(); |
| if (C->need_stack_bang(bangsize) && UseStackBanging) |
| __ generate_stack_overflow_check(bangsize); |
| |
| __ build_frame(framesize); |
| |
| if (NotifySimulator) { |
| __ notify(Assembler::method_entry); |
| } |
| |
| if (VerifyStackAtCalls) { |
| Unimplemented(); |
| } |
| |
| C->set_frame_complete(cbuf.insts_size()); |
| |
| if (C->has_mach_constant_base_node()) { |
| // NOTE: We set the table base offset here because users might be |
| // emitted before MachConstantBaseNode. |
| Compile::ConstantTable& constant_table = C->constant_table(); |
| constant_table.set_table_base_offset(constant_table.calculate_table_base_offset()); |
| } |
| } |
| |
| uint MachPrologNode::size(PhaseRegAlloc* ra_) const |
| { |
| return MachNode::size(ra_); // too many variables; just compute it |
| // the hard way |
| } |
| |
| int MachPrologNode::reloc() const |
| { |
| return 0; |
| } |
| |
| //============================================================================= |
| |
| #ifndef PRODUCT |
| void MachEpilogNode::format(PhaseRegAlloc *ra_, outputStream *st) const { |
| Compile* C = ra_->C; |
| int framesize = C->frame_slots() << LogBytesPerInt; |
| |
| st->print("# pop frame %d\n\t",framesize); |
| |
| if (framesize == 0) { |
| st->print("ldp lr, rfp, [sp],#%d\n\t", (2 * wordSize)); |
| } else if (framesize < ((1 << 9) + 2 * wordSize)) { |
| st->print("ldp lr, rfp, [sp,#%d]\n\t", framesize - 2 * wordSize); |
| st->print("add sp, sp, #%d\n\t", framesize); |
| } else { |
| st->print("mov rscratch1, #%d\n\t", framesize - 2 * wordSize); |
| st->print("add sp, sp, rscratch1\n\t"); |
| st->print("ldp lr, rfp, [sp],#%d\n\t", (2 * wordSize)); |
| } |
| |
| if (do_polling() && C->is_method_compilation()) { |
| st->print("# touch polling page\n\t"); |
| st->print("mov rscratch1, #0x%lx\n\t", p2i(os::get_polling_page())); |
| st->print("ldr zr, [rscratch1]"); |
| } |
| } |
| #endif |
| |
| void MachEpilogNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { |
| Compile* C = ra_->C; |
| MacroAssembler _masm(&cbuf); |
| int framesize = C->frame_slots() << LogBytesPerInt; |
| |
| __ remove_frame(framesize); |
| |
| if (NotifySimulator) { |
| __ notify(Assembler::method_reentry); |
| } |
| |
| if (StackReservedPages > 0 && C->has_reserved_stack_access()) { |
| __ reserved_stack_check(); |
| } |
| |
| if (do_polling() && C->is_method_compilation()) { |
| __ read_polling_page(rscratch1, os::get_polling_page(), relocInfo::poll_return_type); |
| } |
| } |
| |
| uint MachEpilogNode::size(PhaseRegAlloc *ra_) const { |
| // Variable size. Determine dynamically. |
| return MachNode::size(ra_); |
| } |
| |
| int MachEpilogNode::reloc() const { |
| // Return number of relocatable values contained in this instruction. |
| return 1; // 1 for polling page. |
| } |
| |
| const Pipeline * MachEpilogNode::pipeline() const { |
| return MachNode::pipeline_class(); |
| } |
| |
| // This method seems to be obsolete. It is declared in machnode.hpp |
| // and defined in all *.ad files, but it is never called. Should we |
| // get rid of it? |
| int MachEpilogNode::safepoint_offset() const { |
| assert(do_polling(), "no return for this epilog node"); |
| return 4; |
| } |
| |
| //============================================================================= |
| |
| // Figure out which register class each belongs in: rc_int, rc_float or |
| // rc_stack. |
| enum RC { rc_bad, rc_int, rc_float, rc_stack }; |
| |
| static enum RC rc_class(OptoReg::Name reg) { |
| |
| if (reg == OptoReg::Bad) { |
| return rc_bad; |
| } |
| |
| // we have 30 int registers * 2 halves |
| // (rscratch1 and rscratch2 are omitted) |
| |
| if (reg < 60) { |
| return rc_int; |
| } |
| |
| // we have 32 float register * 2 halves |
| if (reg < 60 + 128) { |
| return rc_float; |
| } |
| |
| // Between float regs & stack is the flags regs. |
| assert(OptoReg::is_stack(reg), "blow up if spilling flags"); |
| |
| return rc_stack; |
| } |
| |
| uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, bool do_size, outputStream *st) const { |
| Compile* C = ra_->C; |
| |
| // Get registers to move. |
| OptoReg::Name src_hi = ra_->get_reg_second(in(1)); |
| OptoReg::Name src_lo = ra_->get_reg_first(in(1)); |
| OptoReg::Name dst_hi = ra_->get_reg_second(this); |
| OptoReg::Name dst_lo = ra_->get_reg_first(this); |
| |
| enum RC src_hi_rc = rc_class(src_hi); |
| enum RC src_lo_rc = rc_class(src_lo); |
| enum RC dst_hi_rc = rc_class(dst_hi); |
| enum RC dst_lo_rc = rc_class(dst_lo); |
| |
| assert(src_lo != OptoReg::Bad && dst_lo != OptoReg::Bad, "must move at least 1 register"); |
| |
| if (src_hi != OptoReg::Bad) { |
| assert((src_lo&1)==0 && src_lo+1==src_hi && |
| (dst_lo&1)==0 && dst_lo+1==dst_hi, |
| "expected aligned-adjacent pairs"); |
| } |
| |
| if (src_lo == dst_lo && src_hi == dst_hi) { |
| return 0; // Self copy, no move. |
| } |
| |
| bool is64 = (src_lo & 1) == 0 && src_lo + 1 == src_hi && |
| (dst_lo & 1) == 0 && dst_lo + 1 == dst_hi; |
| int src_offset = ra_->reg2offset(src_lo); |
| int dst_offset = ra_->reg2offset(dst_lo); |
| |
| if (bottom_type()->isa_vect() != NULL) { |
| uint ireg = ideal_reg(); |
| assert(ireg == Op_VecD || ireg == Op_VecX, "must be 64 bit or 128 bit vector"); |
| if (cbuf) { |
| MacroAssembler _masm(cbuf); |
| assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity"); |
| if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) { |
| // stack->stack |
| assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset"); |
| if (ireg == Op_VecD) { |
| __ unspill(rscratch1, true, src_offset); |
| __ spill(rscratch1, true, dst_offset); |
| } else { |
| __ spill_copy128(src_offset, dst_offset); |
| } |
| } else if (src_lo_rc == rc_float && dst_lo_rc == rc_float) { |
| __ mov(as_FloatRegister(Matcher::_regEncode[dst_lo]), |
| ireg == Op_VecD ? __ T8B : __ T16B, |
| as_FloatRegister(Matcher::_regEncode[src_lo])); |
| } else if (src_lo_rc == rc_float && dst_lo_rc == rc_stack) { |
| __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]), |
| ireg == Op_VecD ? __ D : __ Q, |
| ra_->reg2offset(dst_lo)); |
| } else if (src_lo_rc == rc_stack && dst_lo_rc == rc_float) { |
| __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]), |
| ireg == Op_VecD ? __ D : __ Q, |
| ra_->reg2offset(src_lo)); |
| } else { |
| ShouldNotReachHere(); |
| } |
| } |
| } else if (cbuf) { |
| MacroAssembler _masm(cbuf); |
| switch (src_lo_rc) { |
| case rc_int: |
| if (dst_lo_rc == rc_int) { // gpr --> gpr copy |
| if (is64) { |
| __ mov(as_Register(Matcher::_regEncode[dst_lo]), |
| as_Register(Matcher::_regEncode[src_lo])); |
| } else { |
| MacroAssembler _masm(cbuf); |
| __ movw(as_Register(Matcher::_regEncode[dst_lo]), |
| as_Register(Matcher::_regEncode[src_lo])); |
| } |
| } else if (dst_lo_rc == rc_float) { // gpr --> fpr copy |
| if (is64) { |
| __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]), |
| as_Register(Matcher::_regEncode[src_lo])); |
| } else { |
| __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]), |
| as_Register(Matcher::_regEncode[src_lo])); |
| } |
| } else { // gpr --> stack spill |
| assert(dst_lo_rc == rc_stack, "spill to bad register class"); |
| __ spill(as_Register(Matcher::_regEncode[src_lo]), is64, dst_offset); |
| } |
| break; |
| case rc_float: |
| if (dst_lo_rc == rc_int) { // fpr --> gpr copy |
| if (is64) { |
| __ fmovd(as_Register(Matcher::_regEncode[dst_lo]), |
| as_FloatRegister(Matcher::_regEncode[src_lo])); |
| } else { |
| __ fmovs(as_Register(Matcher::_regEncode[dst_lo]), |
| as_FloatRegister(Matcher::_regEncode[src_lo])); |
| } |
| } else if (dst_lo_rc == rc_float) { // fpr --> fpr copy |
| if (cbuf) { |
| __ fmovd(as_FloatRegister(Matcher::_regEncode[dst_lo]), |
| as_FloatRegister(Matcher::_regEncode[src_lo])); |
| } else { |
| __ fmovs(as_FloatRegister(Matcher::_regEncode[dst_lo]), |
| as_FloatRegister(Matcher::_regEncode[src_lo])); |
| } |
| } else { // fpr --> stack spill |
| assert(dst_lo_rc == rc_stack, "spill to bad register class"); |
| __ spill(as_FloatRegister(Matcher::_regEncode[src_lo]), |
| is64 ? __ D : __ S, dst_offset); |
| } |
| break; |
| case rc_stack: |
| if (dst_lo_rc == rc_int) { // stack --> gpr load |
| __ unspill(as_Register(Matcher::_regEncode[dst_lo]), is64, src_offset); |
| } else if (dst_lo_rc == rc_float) { // stack --> fpr load |
| __ unspill(as_FloatRegister(Matcher::_regEncode[dst_lo]), |
| is64 ? __ D : __ S, src_offset); |
| } else { // stack --> stack copy |
| assert(dst_lo_rc == rc_stack, "spill to bad register class"); |
| __ unspill(rscratch1, is64, src_offset); |
| __ spill(rscratch1, is64, dst_offset); |
| } |
| break; |
| default: |
| assert(false, "bad rc_class for spill"); |
| ShouldNotReachHere(); |
| } |
| } |
| |
| if (st) { |
| st->print("spill "); |
| if (src_lo_rc == rc_stack) { |
| st->print("[sp, #%d] -> ", ra_->reg2offset(src_lo)); |
| } else { |
| st->print("%s -> ", Matcher::regName[src_lo]); |
| } |
| if (dst_lo_rc == rc_stack) { |
| st->print("[sp, #%d]", ra_->reg2offset(dst_lo)); |
| } else { |
| st->print("%s", Matcher::regName[dst_lo]); |
| } |
| if (bottom_type()->isa_vect() != NULL) { |
| st->print("\t# vector spill size = %d", ideal_reg()==Op_VecD ? 64:128); |
| } else { |
| st->print("\t# spill size = %d", is64 ? 64:32); |
| } |
| } |
| |
| return 0; |
| |
| } |
| |
| #ifndef PRODUCT |
| void MachSpillCopyNode::format(PhaseRegAlloc *ra_, outputStream *st) const { |
| if (!ra_) |
| st->print("N%d = SpillCopy(N%d)", _idx, in(1)->_idx); |
| else |
| implementation(NULL, ra_, false, st); |
| } |
| #endif |
| |
| void MachSpillCopyNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { |
| implementation(&cbuf, ra_, false, NULL); |
| } |
| |
| uint MachSpillCopyNode::size(PhaseRegAlloc *ra_) const { |
| return MachNode::size(ra_); |
| } |
| |
| //============================================================================= |
| |
| #ifndef PRODUCT |
| void BoxLockNode::format(PhaseRegAlloc *ra_, outputStream *st) const { |
| int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); |
| int reg = ra_->get_reg_first(this); |
| st->print("add %s, rsp, #%d]\t# box lock", |
| Matcher::regName[reg], offset); |
| } |
| #endif |
| |
| void BoxLockNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const { |
| MacroAssembler _masm(&cbuf); |
| |
| int offset = ra_->reg2offset(in_RegMask(0).find_first_elem()); |
| int reg = ra_->get_encode(this); |
| |
| if (Assembler::operand_valid_for_add_sub_immediate(offset)) { |
| __ add(as_Register(reg), sp, offset); |
| } else { |
| ShouldNotReachHere(); |
| } |
| } |
| |
| uint BoxLockNode::size(PhaseRegAlloc *ra_) const { |
| // BoxLockNode is not a MachNode, so we can't just call MachNode::size(ra_). |
| return 4; |
| } |
| |
| //============================================================================= |
| |
| #ifndef PRODUCT |
| void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const |
| { |
| st->print_cr("# MachUEPNode"); |
| if (UseCompressedClassPointers) { |
| st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); |
| if (Universe::narrow_klass_shift() != 0) { |
| st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1"); |
| } |
| } else { |
| st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass"); |
| } |
| st->print_cr("\tcmp r0, rscratch1\t # Inline cache check"); |
| st->print_cr("\tbne, SharedRuntime::_ic_miss_stub"); |
| } |
| #endif |
| |
| void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const |
| { |
| // This is the unverified entry point. |
| MacroAssembler _masm(&cbuf); |
| |
| __ cmp_klass(j_rarg0, rscratch2, rscratch1); |
| Label skip; |
| // TODO |
| // can we avoid this skip and still use a reloc? |
| __ br(Assembler::EQ, skip); |
| __ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub())); |
| __ bind(skip); |
| } |
| |
| uint MachUEPNode::size(PhaseRegAlloc* ra_) const |
| { |
| return MachNode::size(ra_); |
| } |
| |
| // REQUIRED EMIT CODE |
| |
| //============================================================================= |
| |
| // Emit exception handler code. |
| int HandlerImpl::emit_exception_handler(CodeBuffer& cbuf) |
| { |
| // mov rscratch1 #exception_blob_entry_point |
| // br rscratch1 |
| // Note that the code buffer's insts_mark is always relative to insts. |
| // That's why we must use the macroassembler to generate a handler. |
| MacroAssembler _masm(&cbuf); |
| address base = __ start_a_stub(size_exception_handler()); |
| if (base == NULL) { |
| ciEnv::current()->record_failure("CodeCache is full"); |
| return 0; // CodeBuffer::expand failed |
| } |
| int offset = __ offset(); |
| __ far_jump(RuntimeAddress(OptoRuntime::exception_blob()->entry_point())); |
| assert(__ offset() - offset <= (int) size_exception_handler(), "overflow"); |
| __ end_a_stub(); |
| return offset; |
| } |
| |
| // Emit deopt handler code. |
| int HandlerImpl::emit_deopt_handler(CodeBuffer& cbuf) |
| { |
| // Note that the code buffer's insts_mark is always relative to insts. |
| // That's why we must use the macroassembler to generate a handler. |
| MacroAssembler _masm(&cbuf); |
| address base = __ start_a_stub(size_deopt_handler()); |
| if (base == NULL) { |
| ciEnv::current()->record_failure("CodeCache is full"); |
| return 0; // CodeBuffer::expand failed |
| } |
| int offset = __ offset(); |
| |
| __ adr(lr, __ pc()); |
| __ far_jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack())); |
| |
| assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow"); |
| __ end_a_stub(); |
| return offset; |
| } |
| |
| // REQUIRED MATCHER CODE |
| |
| //============================================================================= |
| |
| const bool Matcher::match_rule_supported(int opcode) { |
| |
| switch (opcode) { |
| default: |
| break; |
| } |
| |
| if (!has_match_rule(opcode)) { |
| return false; |
| } |
| |
| return true; // Per default match rules are supported. |
| } |
| |
| const bool Matcher::match_rule_supported_vector(int opcode, int vlen) { |
| |
| // TODO |
| // identify extra cases that we might want to provide match rules for |
| // e.g. Op_ vector nodes and other intrinsics while guarding with vlen |
| bool ret_value = match_rule_supported(opcode); |
| // Add rules here. |
| |
| return ret_value; // Per default match rules are supported. |
| } |
| |
| const bool Matcher::has_predicated_vectors(void) { |
| return false; |
| } |
| |
| const int Matcher::float_pressure(int default_pressure_threshold) { |
| return default_pressure_threshold; |
| } |
| |
| int Matcher::regnum_to_fpu_offset(int regnum) |
| { |
| Unimplemented(); |
| return 0; |
| } |
| |
| // Is this branch offset short enough that a short branch can be used? |
| // |
| // NOTE: If the platform does not provide any short branch variants, then |
| // this method should return false for offset 0. |
| bool Matcher::is_short_branch_offset(int rule, int br_size, int offset) { |
| // The passed offset is relative to address of the branch. |
| |
| return (-32768 <= offset && offset < 32768); |
| } |
| |
| const bool Matcher::isSimpleConstant64(jlong value) { |
| // Will one (StoreL ConL) be cheaper than two (StoreI ConI)?. |
| // Probably always true, even if a temp register is required. |
| return true; |
| } |
| |
| // true just means we have fast l2f conversion |
| const bool Matcher::convL2FSupported(void) { |
| return true; |
| } |
| |
| // Vector width in bytes. |
| const int Matcher::vector_width_in_bytes(BasicType bt) { |
| int size = MIN2(16,(int)MaxVectorSize); |
| // Minimum 2 values in vector |
| if (size < 2*type2aelembytes(bt)) size = 0; |
| // But never < 4 |
| if (size < 4) size = 0; |
| return size; |
| } |
| |
| // Limits on vector size (number of elements) loaded into vector. |
| const int Matcher::max_vector_size(const BasicType bt) { |
| return vector_width_in_bytes(bt)/type2aelembytes(bt); |
| } |
| const int Matcher::min_vector_size(const BasicType bt) { |
| // For the moment limit the vector size to 8 bytes |
| int size = 8 / type2aelembytes(bt); |
| if (size < 2) size = 2; |
| return size; |
| } |
| |
| // Vector ideal reg. |
| const uint Matcher::vector_ideal_reg(int len) { |
| switch(len) { |
| case 8: return Op_VecD; |
| case 16: return Op_VecX; |
| } |
| ShouldNotReachHere(); |
| return 0; |
| } |
| |
| const uint Matcher::vector_shift_count_ideal_reg(int size) { |
| return Op_VecX; |
| } |
| |
| // AES support not yet implemented |
| const bool Matcher::pass_original_key_for_aes() { |
| return false; |
| } |
| |
| // x86 supports misaligned vectors store/load. |
| const bool Matcher::misaligned_vectors_ok() { |
| return !AlignVector; // can be changed by flag |
| } |
| |
| // false => size gets scaled to BytesPerLong, ok. |
| const bool Matcher::init_array_count_is_in_bytes = false; |
| |
| // Use conditional move (CMOVL) |
| const int Matcher::long_cmove_cost() { |
| // long cmoves are no more expensive than int cmoves |
| return 0; |
| } |
| |
| const int Matcher::float_cmove_cost() { |
| // float cmoves are no more expensive than int cmoves |
| return 0; |
| } |
| |
| // Does the CPU require late expand (see block.cpp for description of late expand)? |
| const bool Matcher::require_postalloc_expand = false; |
| |
| // Do we need to mask the count passed to shift instructions or does |
| // the cpu only look at the lower 5/6 bits anyway? |
| const bool Matcher::need_masked_shift_count = false; |
| |
| // This affects two different things: |
| // - how Decode nodes are matched |
| // - how ImplicitNullCheck opportunities are recognized |
| // If true, the matcher will try to remove all Decodes and match them |
| // (as operands) into nodes. NullChecks are not prepared to deal with |
| // Decodes by final_graph_reshaping(). |
| // If false, final_graph_reshaping() forces the decode behind the Cmp |
| // for a NullCheck. The matcher matches the Decode node into a register. |
| // Implicit_null_check optimization moves the Decode along with the |
| // memory operation back up before the NullCheck. |
| bool Matcher::narrow_oop_use_complex_address() { |
| return Universe::narrow_oop_shift() == 0; |
| } |
| |
| bool Matcher::narrow_klass_use_complex_address() { |
| // TODO |
| // decide whether we need to set this to true |
| return false; |
| } |
| |
| bool Matcher::const_oop_prefer_decode() { |
| // Prefer ConN+DecodeN over ConP in simple compressed oops mode. |
| return Universe::narrow_oop_base() == NULL; |
| } |
| |
| bool Matcher::const_klass_prefer_decode() { |
| // Prefer ConNKlass+DecodeNKlass over ConP in simple compressed klass mode. |
| return Universe::narrow_klass_base() == NULL; |
| } |
| |
| // Is it better to copy float constants, or load them directly from |
| // memory? Intel can load a float constant from a direct address, |
| // requiring no extra registers. Most RISCs will have to materialize |
| // an address into a register first, so they would do better to copy |
| // the constant from stack. |
| const bool Matcher::rematerialize_float_constants = false; |
| |
| // If CPU can load and store mis-aligned doubles directly then no |
| // fixup is needed. Else we split the double into 2 integer pieces |
| // and move it piece-by-piece. Only happens when passing doubles into |
| // C code as the Java calling convention forces doubles to be aligned. |
| const bool Matcher::misaligned_doubles_ok = true; |
| |
| // No-op on amd64 |
| void Matcher::pd_implicit_null_fixup(MachNode *node, uint idx) { |
| Unimplemented(); |
| } |
| |
| // Advertise here if the CPU requires explicit rounding operations to |
| // implement the UseStrictFP mode. |
| const bool Matcher::strict_fp_requires_explicit_rounding = false; |
| |
| // Are floats converted to double when stored to stack during |
| // deoptimization? |
| bool Matcher::float_in_double() { return true; } |
| |
| // Do ints take an entire long register or just half? |
| // The relevant question is how the int is callee-saved: |
| // the whole long is written but de-opt'ing will have to extract |
| // the relevant 32 bits. |
| const bool Matcher::int_in_long = true; |
| |
| // Return whether or not this register is ever used as an argument. |
| // This function is used on startup to build the trampoline stubs in |
| // generateOptoStub. Registers not mentioned will be killed by the VM |
| // call in the trampoline, and arguments in those registers not be |
| // available to the callee. |
| bool Matcher::can_be_java_arg(int reg) |
| { |
| return |
| reg == R0_num || reg == R0_H_num || |
| reg == R1_num || reg == R1_H_num || |
| reg == R2_num || reg == R2_H_num || |
| reg == R3_num || reg == R3_H_num || |
| reg == R4_num || reg == R4_H_num || |
| reg == R5_num || reg == R5_H_num || |
| reg == R6_num || reg == R6_H_num || |
| reg == R7_num || reg == R7_H_num || |
| reg == V0_num || reg == V0_H_num || |
| reg == V1_num || reg == V1_H_num || |
| reg == V2_num || reg == V2_H_num || |
| reg == V3_num || reg == V3_H_num || |
| reg == V4_num || reg == V4_H_num || |
| reg == V5_num || reg == V5_H_num || |
| reg == V6_num || reg == V6_H_num || |
| reg == V7_num || reg == V7_H_num; |
| } |
| |
| bool Matcher::is_spillable_arg(int reg) |
| { |
| return can_be_java_arg(reg); |
| } |
| |
| bool Matcher::use_asm_for_ldiv_by_con(jlong divisor) { |
| return false; |
| } |
| |
| RegMask Matcher::divI_proj_mask() { |
| ShouldNotReachHere(); |
| return RegMask(); |
| } |
| |
| // Register for MODI projection of divmodI. |
| RegMask Matcher::modI_proj_mask() { |
| ShouldNotReachHere(); |
| return RegMask(); |
| } |
| |
| // Register for DIVL projection of divmodL. |
| RegMask Matcher::divL_proj_mask() { |
| ShouldNotReachHere(); |
| return RegMask(); |
| } |
| |
| // Register for MODL projection of divmodL. |
| RegMask Matcher::modL_proj_mask() { |
| ShouldNotReachHere(); |
| return RegMask(); |
| } |
| |
| const RegMask Matcher::method_handle_invoke_SP_save_mask() { |
| return FP_REG_mask(); |
| } |
| |
| bool size_fits_all_mem_uses(AddPNode* addp, int shift) { |
| for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { |
| Node* u = addp->fast_out(i); |
| if (u->is_Mem()) { |
| int opsize = u->as_Mem()->memory_size(); |
| assert(opsize > 0, "unexpected memory operand size"); |
| if (u->as_Mem()->memory_size() != (1<<shift)) { |
| return false; |
| } |
| } |
| } |
| return true; |
| } |
| |
| const bool Matcher::convi2l_type_required = false; |
| |
| // Should the Matcher clone shifts on addressing modes, expecting them |
| // to be subsumed into complex addressing expressions or compute them |
| // into registers? |
| bool Matcher::clone_address_expressions(AddPNode* m, Matcher::MStack& mstack, VectorSet& address_visited) { |
| if (clone_base_plus_offset_address(m, mstack, address_visited)) { |
| return true; |
| } |
| |
| Node *off = m->in(AddPNode::Offset); |
| if (off->Opcode() == Op_LShiftL && off->in(2)->is_Con() && |
| size_fits_all_mem_uses(m, off->in(2)->get_int()) && |
| // Are there other uses besides address expressions? |
| !is_visited(off)) { |
| address_visited.set(off->_idx); // Flag as address_visited |
| mstack.push(off->in(2), Visit); |
| Node *conv = off->in(1); |
| if (conv->Opcode() == Op_ConvI2L && |
| // Are there other uses besides address expressions? |
| !is_visited(conv)) { |
| address_visited.set(conv->_idx); // Flag as address_visited |
| mstack.push(conv->in(1), Pre_Visit); |
| } else { |
| mstack.push(conv, Pre_Visit); |
| } |
| address_visited.test_set(m->_idx); // Flag as address_visited |
| mstack.push(m->in(AddPNode::Address), Pre_Visit); |
| mstack.push(m->in(AddPNode::Base), Pre_Visit); |
| return true; |
| } else if (off->Opcode() == Op_ConvI2L && |
| // Are there other uses besides address expressions? |
| !is_visited(off)) { |
| address_visited.test_set(m->_idx); // Flag as address_visited |
| address_visited.set(off->_idx); // Flag as address_visited |
| mstack.push(off->in(1), Pre_Visit); |
| mstack.push(m->in(AddPNode::Address), Pre_Visit); |
| mstack.push(m->in(AddPNode::Base), Pre_Visit); |
| return true; |
| } |
| return false; |
| } |
| |
| // Transform: |
| // (AddP base (AddP base address (LShiftL index con)) offset) |
| // into: |
| // (AddP base (AddP base offset) (LShiftL index con)) |
| // to take full advantage of ARM's addressing modes |
| void Compile::reshape_address(AddPNode* addp) { |
| Node *addr = addp->in(AddPNode::Address); |
| if (addr->is_AddP() && addr->in(AddPNode::Base) == addp->in(AddPNode::Base)) { |
| const AddPNode *addp2 = addr->as_AddP(); |
| if ((addp2->in(AddPNode::Offset)->Opcode() == Op_LShiftL && |
| addp2->in(AddPNode::Offset)->in(2)->is_Con() && |
| size_fits_all_mem_uses(addp, addp2->in(AddPNode::Offset)->in(2)->get_int())) || |
| addp2->in(AddPNode::Offset)->Opcode() == Op_ConvI2L) { |
| |
| // Any use that can't embed the address computation? |
| for (DUIterator_Fast imax, i = addp->fast_outs(imax); i < imax; i++) { |
| Node* u = addp->fast_out(i); |
| if (!u->is_Mem()) { |
| return; |
| } |
| if (u->is_LoadVector() || u->is_StoreVector() || u->Opcode() == Op_StoreCM) { |
| return; |
| } |
| if (addp2->in(AddPNode::Offset)->Opcode() != Op_ConvI2L) { |
| int scale = 1 << addp2->in(AddPNode::Offset)->in(2)->get_int(); |
| if (VM_Version::expensive_load(u->as_Mem()->memory_size(), scale)) { |
| return; |
| } |
| } |
| } |
| |
| Node* off = addp->in(AddPNode::Offset); |
| Node* addr2 = addp2->in(AddPNode::Address); |
| Node* base = addp->in(AddPNode::Base); |
| |
| Node* new_addr = NULL; |
| // Check whether the graph already has the new AddP we need |
| // before we create one (no GVN available here). |
| for (DUIterator_Fast imax, i = addr2->fast_outs(imax); i < imax; i++) { |
| Node* u = addr2->fast_out(i); |
| if (u->is_AddP() && |
| u->in(AddPNode::Base) == base && |
| u->in(AddPNode::Address) == addr2 && |
| u->in(AddPNode::Offset) == off) { |
| new_addr = u; |
| break; |
| } |
| } |
| |
| if (new_addr == NULL) { |
| new_addr = new AddPNode(base, addr2, off); |
| } |
| Node* new_off = addp2->in(AddPNode::Offset); |
| addp->set_req(AddPNode::Address, new_addr); |
| if (addr->outcnt() == 0) { |
| addr->disconnect_inputs(NULL, this); |
| } |
| addp->set_req(AddPNode::Offset, new_off); |
| if (off->outcnt() == 0) { |
| off->disconnect_inputs(NULL, this); |
| } |
| } |
| } |
| } |
| |
| // helper for encoding java_to_runtime calls on sim |
| // |
| // this is needed to compute the extra arguments required when |
| // planting a call to the simulator blrt instruction. the TypeFunc |
| // can be queried to identify the counts for integral, and floating |
| // arguments and the return type |
| |
| static void getCallInfo(const TypeFunc *tf, int &gpcnt, int &fpcnt, int &rtype) |
| { |
| int gps = 0; |
| int fps = 0; |
| const TypeTuple *domain = tf->domain(); |
| int max = domain->cnt(); |
| for (int i = TypeFunc::Parms; i < max; i++) { |
| const Type *t = domain->field_at(i); |
| switch(t->basic_type()) { |
| case T_FLOAT: |
| case T_DOUBLE: |
| fps++; |
| default: |
| gps++; |
| } |
| } |
| gpcnt = gps; |
| fpcnt = fps; |
| BasicType rt = tf->return_type(); |
| switch (rt) { |
| case T_VOID: |
| rtype = MacroAssembler::ret_type_void; |
| break; |
| default: |
| rtype = MacroAssembler::ret_type_integral; |
| break; |
| case T_FLOAT: |
| rtype = MacroAssembler::ret_type_float; |
| break; |
| case T_DOUBLE: |
| rtype = MacroAssembler::ret_type_double; |
| break; |
| } |
| } |
| |
| #define MOV_VOLATILE(REG, BASE, INDEX, SCALE, DISP, SCRATCH, INSN) \ |
| MacroAssembler _masm(&cbuf); \ |
| { \ |
| guarantee(INDEX == -1, "mode not permitted for volatile"); \ |
| guarantee(DISP == 0, "mode not permitted for volatile"); \ |
| guarantee(SCALE == 0, "mode not permitted for volatile"); \ |
| __ INSN(REG, as_Register(BASE)); \ |
| } |
| |
| typedef void (MacroAssembler::* mem_insn)(Register Rt, const Address &adr); |
| typedef void (MacroAssembler::* mem_float_insn)(FloatRegister Rt, const Address &adr); |
| typedef void (MacroAssembler::* mem_vector_insn)(FloatRegister Rt, |
| MacroAssembler::SIMD_RegVariant T, const Address &adr); |
| |
| // Used for all non-volatile memory accesses. The use of |
| // $mem->opcode() to discover whether this pattern uses sign-extended |
| // offsets is something of a kludge. |
| static void loadStore(MacroAssembler masm, mem_insn insn, |
| Register reg, int opcode, |
| Register base, int index, int size, int disp) |
| { |
| Address::extend scale; |
| |
| // Hooboy, this is fugly. We need a way to communicate to the |
| // encoder that the index needs to be sign extended, so we have to |
| // enumerate all the cases. |
| switch (opcode) { |
| case INDINDEXSCALEDI2L: |
| case INDINDEXSCALEDI2LN: |
| case INDINDEXI2L: |
| case INDINDEXI2LN: |
| scale = Address::sxtw(size); |
| break; |
| default: |
| scale = Address::lsl(size); |
| } |
| |
| if (index == -1) { |
| (masm.*insn)(reg, Address(base, disp)); |
| } else { |
| assert(disp == 0, "unsupported address mode: disp = %d", disp); |
| (masm.*insn)(reg, Address(base, as_Register(index), scale)); |
| } |
| } |
| |
| static void loadStore(MacroAssembler masm, mem_float_insn insn, |
| FloatRegister reg, int opcode, |
| Register base, int index, int size, int disp) |
| { |
| Address::extend scale; |
| |
| switch (opcode) { |
| case INDINDEXSCALEDI2L: |
| case INDINDEXSCALEDI2LN: |
| scale = Address::sxtw(size); |
| break; |
| default: |
| scale = Address::lsl(size); |
| } |
| |
| if (index == -1) { |
| (masm.*insn)(reg, Address(base, disp)); |
| } else { |
| assert(disp == 0, "unsupported address mode: disp = %d", disp); |
| (masm.*insn)(reg, Address(base, as_Register(index), scale)); |
| } |
| } |
| |
| static void loadStore(MacroAssembler masm, mem_vector_insn insn, |
| FloatRegister reg, MacroAssembler::SIMD_RegVariant T, |
| int opcode, Register base, int index, int size, int disp) |
| { |
| if (index == -1) { |
| (masm.*insn)(reg, T, Address(base, disp)); |
| } else { |
| assert(disp == 0, "unsupported address mode"); |
| (masm.*insn)(reg, T, Address(base, as_Register(index), Address::lsl(size))); |
| } |
| } |
| |
| %} |
| |
| |
| |
| //----------ENCODING BLOCK----------------------------------------------------- |
| // This block specifies the encoding classes used by the compiler to |
| // output byte streams. Encoding classes are parameterized macros |
| // used by Machine Instruction Nodes in order to generate the bit |
| // encoding of the instruction. Operands specify their base encoding |
| // interface with the interface keyword. There are currently |
| // supported four interfaces, REG_INTER, CONST_INTER, MEMORY_INTER, & |
| // COND_INTER. REG_INTER causes an operand to generate a function |
| // which returns its register number when queried. CONST_INTER causes |
| // an operand to generate a function which returns the value of the |
| // constant when queried. MEMORY_INTER causes an operand to generate |
| // four functions which return the Base Register, the Index Register, |
| // the Scale Value, and the Offset Value of the operand when queried. |
| // COND_INTER causes an operand to generate six functions which return |
| // the encoding code (ie - encoding bits for the instruction) |
| // associated with each basic boolean condition for a conditional |
| // instruction. |
| // |
| // Instructions specify two basic values for encoding. Again, a |
| // function is available to check if the constant displacement is an |
| // oop. They use the ins_encode keyword to specify their encoding |
| // classes (which must be a sequence of enc_class names, and their |
| // parameters, specified in the encoding block), and they use the |
| // opcode keyword to specify, in order, their primary, secondary, and |
| // tertiary opcode. Only the opcode sections which a particular |
| // instruction needs for encoding need to be specified. |
| encode %{ |
| // Build emit functions for each basic byte or larger field in the |
| // intel encoding scheme (opcode, rm, sib, immediate), and call them |
| // from C++ code in the enc_class source block. Emit functions will |
| // live in the main source block for now. In future, we can |
| // generalize this by adding a syntax that specifies the sizes of |
| // fields in an order, so that the adlc can build the emit functions |
| // automagically |
| |
| // catch all for unimplemented encodings |
| enc_class enc_unimplemented %{ |
| MacroAssembler _masm(&cbuf); |
| __ unimplemented("C2 catch all"); |
| %} |
| |
| // BEGIN Non-volatile memory access |
| |
| enc_class aarch64_enc_ldrsbw(iRegI dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsbw, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrsb(iRegI dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsb, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrb(iRegI dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrb(iRegL dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrb, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrshw(iRegI dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrshw, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrsh(iRegI dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsh, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrh(iRegI dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrh(iRegL dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrh, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrw(iRegI dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrw(iRegL dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrw, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrsw(iRegL dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrsw, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldr(iRegL dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrs(vRegF dst, memory mem) %{ |
| FloatRegister dst_reg = as_FloatRegister($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrd(vRegD dst, memory mem) %{ |
| FloatRegister dst_reg = as_FloatRegister($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, dst_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrvS(vecD dst, memory mem) %{ |
| FloatRegister dst_reg = as_FloatRegister($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::S, |
| $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrvD(vecD dst, memory mem) %{ |
| FloatRegister dst_reg = as_FloatRegister($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::D, |
| $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_ldrvQ(vecX dst, memory mem) %{ |
| FloatRegister dst_reg = as_FloatRegister($dst$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldr, dst_reg, MacroAssembler::Q, |
| $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strb(iRegI src, memory mem) %{ |
| Register src_reg = as_Register($src$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::strb, src_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strb0(memory mem) %{ |
| MacroAssembler _masm(&cbuf); |
| loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strb0_ordered(memory mem) %{ |
| MacroAssembler _masm(&cbuf); |
| __ membar(Assembler::StoreStore); |
| loadStore(_masm, &MacroAssembler::strb, zr, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strh(iRegI src, memory mem) %{ |
| Register src_reg = as_Register($src$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::strh, src_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strh0(memory mem) %{ |
| MacroAssembler _masm(&cbuf); |
| loadStore(_masm, &MacroAssembler::strh, zr, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strw(iRegI src, memory mem) %{ |
| Register src_reg = as_Register($src$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::strw, src_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strw0(memory mem) %{ |
| MacroAssembler _masm(&cbuf); |
| loadStore(_masm, &MacroAssembler::strw, zr, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_str(iRegL src, memory mem) %{ |
| Register src_reg = as_Register($src$$reg); |
| // we sometimes get asked to store the stack pointer into the |
| // current thread -- we cannot do that directly on AArch64 |
| if (src_reg == r31_sp) { |
| MacroAssembler _masm(&cbuf); |
| assert(as_Register($mem$$base) == rthread, "unexpected store for sp"); |
| __ mov(rscratch2, sp); |
| src_reg = rscratch2; |
| } |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_str0(memory mem) %{ |
| MacroAssembler _masm(&cbuf); |
| loadStore(_masm, &MacroAssembler::str, zr, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strs(vRegF src, memory mem) %{ |
| FloatRegister src_reg = as_FloatRegister($src$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::strs, src_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strd(vRegD src, memory mem) %{ |
| FloatRegister src_reg = as_FloatRegister($src$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::strd, src_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strvS(vecD src, memory mem) %{ |
| FloatRegister src_reg = as_FloatRegister($src$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::S, |
| $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strvD(vecD src, memory mem) %{ |
| FloatRegister src_reg = as_FloatRegister($src$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::D, |
| $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| enc_class aarch64_enc_strvQ(vecX src, memory mem) %{ |
| FloatRegister src_reg = as_FloatRegister($src$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::str, src_reg, MacroAssembler::Q, |
| $mem->opcode(), as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| %} |
| |
| // END Non-volatile memory access |
| |
| // volatile loads and stores |
| |
| enc_class aarch64_enc_stlrb(iRegI src, memory mem) %{ |
| MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, stlrb); |
| %} |
| |
| enc_class aarch64_enc_stlrh(iRegI src, memory mem) %{ |
| MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, stlrh); |
| %} |
| |
| enc_class aarch64_enc_stlrw(iRegI src, memory mem) %{ |
| MOV_VOLATILE(as_Register($src$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, stlrw); |
| %} |
| |
| |
| enc_class aarch64_enc_ldarsbw(iRegI dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldarb); |
| __ sxtbw(dst_reg, dst_reg); |
| %} |
| |
| enc_class aarch64_enc_ldarsb(iRegL dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldarb); |
| __ sxtb(dst_reg, dst_reg); |
| %} |
| |
| enc_class aarch64_enc_ldarbw(iRegI dst, memory mem) %{ |
| MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldarb); |
| %} |
| |
| enc_class aarch64_enc_ldarb(iRegL dst, memory mem) %{ |
| MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldarb); |
| %} |
| |
| enc_class aarch64_enc_ldarshw(iRegI dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldarh); |
| __ sxthw(dst_reg, dst_reg); |
| %} |
| |
| enc_class aarch64_enc_ldarsh(iRegL dst, memory mem) %{ |
| Register dst_reg = as_Register($dst$$reg); |
| MOV_VOLATILE(dst_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldarh); |
| __ sxth(dst_reg, dst_reg); |
| %} |
| |
| enc_class aarch64_enc_ldarhw(iRegI dst, memory mem) %{ |
| MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldarh); |
| %} |
| |
| enc_class aarch64_enc_ldarh(iRegL dst, memory mem) %{ |
| MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldarh); |
| %} |
| |
| enc_class aarch64_enc_ldarw(iRegI dst, memory mem) %{ |
| MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldarw); |
| %} |
| |
| enc_class aarch64_enc_ldarw(iRegL dst, memory mem) %{ |
| MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldarw); |
| %} |
| |
| enc_class aarch64_enc_ldar(iRegL dst, memory mem) %{ |
| MOV_VOLATILE(as_Register($dst$$reg), $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldar); |
| %} |
| |
| enc_class aarch64_enc_fldars(vRegF dst, memory mem) %{ |
| MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldarw); |
| __ fmovs(as_FloatRegister($dst$$reg), rscratch1); |
| %} |
| |
| enc_class aarch64_enc_fldard(vRegD dst, memory mem) %{ |
| MOV_VOLATILE(rscratch1, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, ldar); |
| __ fmovd(as_FloatRegister($dst$$reg), rscratch1); |
| %} |
| |
| enc_class aarch64_enc_stlr(iRegL src, memory mem) %{ |
| Register src_reg = as_Register($src$$reg); |
| // we sometimes get asked to store the stack pointer into the |
| // current thread -- we cannot do that directly on AArch64 |
| if (src_reg == r31_sp) { |
| MacroAssembler _masm(&cbuf); |
| assert(as_Register($mem$$base) == rthread, "unexpected store for sp"); |
| __ mov(rscratch2, sp); |
| src_reg = rscratch2; |
| } |
| MOV_VOLATILE(src_reg, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, stlr); |
| %} |
| |
| enc_class aarch64_enc_fstlrs(vRegF src, memory mem) %{ |
| { |
| MacroAssembler _masm(&cbuf); |
| FloatRegister src_reg = as_FloatRegister($src$$reg); |
| __ fmovs(rscratch2, src_reg); |
| } |
| MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, stlrw); |
| %} |
| |
| enc_class aarch64_enc_fstlrd(vRegD src, memory mem) %{ |
| { |
| MacroAssembler _masm(&cbuf); |
| FloatRegister src_reg = as_FloatRegister($src$$reg); |
| __ fmovd(rscratch2, src_reg); |
| } |
| MOV_VOLATILE(rscratch2, $mem$$base, $mem$$index, $mem$$scale, $mem$$disp, |
| rscratch1, stlr); |
| %} |
| |
| // synchronized read/update encodings |
| |
| enc_class aarch64_enc_ldaxr(iRegL dst, memory mem) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| Register base = as_Register($mem$$base); |
| int index = $mem$$index; |
| int scale = $mem$$scale; |
| int disp = $mem$$disp; |
| if (index == -1) { |
| if (disp != 0) { |
| __ lea(rscratch1, Address(base, disp)); |
| __ ldaxr(dst_reg, rscratch1); |
| } else { |
| // TODO |
| // should we ever get anything other than this case? |
| __ ldaxr(dst_reg, base); |
| } |
| } else { |
| Register index_reg = as_Register(index); |
| if (disp == 0) { |
| __ lea(rscratch1, Address(base, index_reg, Address::lsl(scale))); |
| __ ldaxr(dst_reg, rscratch1); |
| } else { |
| __ lea(rscratch1, Address(base, disp)); |
| __ lea(rscratch1, Address(rscratch1, index_reg, Address::lsl(scale))); |
| __ ldaxr(dst_reg, rscratch1); |
| } |
| } |
| %} |
| |
| enc_class aarch64_enc_stlxr(iRegLNoSp src, memory mem) %{ |
| MacroAssembler _masm(&cbuf); |
| Register src_reg = as_Register($src$$reg); |
| Register base = as_Register($mem$$base); |
| int index = $mem$$index; |
| int scale = $mem$$scale; |
| int disp = $mem$$disp; |
| if (index == -1) { |
| if (disp != 0) { |
| __ lea(rscratch2, Address(base, disp)); |
| __ stlxr(rscratch1, src_reg, rscratch2); |
| } else { |
| // TODO |
| // should we ever get anything other than this case? |
| __ stlxr(rscratch1, src_reg, base); |
| } |
| } else { |
| Register index_reg = as_Register(index); |
| if (disp == 0) { |
| __ lea(rscratch2, Address(base, index_reg, Address::lsl(scale))); |
| __ stlxr(rscratch1, src_reg, rscratch2); |
| } else { |
| __ lea(rscratch2, Address(base, disp)); |
| __ lea(rscratch2, Address(rscratch2, index_reg, Address::lsl(scale))); |
| __ stlxr(rscratch1, src_reg, rscratch2); |
| } |
| } |
| __ cmpw(rscratch1, zr); |
| %} |
| |
| enc_class aarch64_enc_cmpxchg(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{ |
| MacroAssembler _masm(&cbuf); |
| guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); |
| __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::xword, /*acquire*/ false, /*release*/ true, |
| /*weak*/ false, noreg); |
| %} |
| |
| enc_class aarch64_enc_cmpxchgw(memory mem, iRegINoSp oldval, iRegINoSp newval) %{ |
| MacroAssembler _masm(&cbuf); |
| guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); |
| __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::word, /*acquire*/ false, /*release*/ true, |
| /*weak*/ false, noreg); |
| %} |
| |
| enc_class aarch64_enc_cmpxchgs(memory mem, iRegINoSp oldval, iRegINoSp newval) %{ |
| MacroAssembler _masm(&cbuf); |
| guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); |
| __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::halfword, /*acquire*/ false, /*release*/ true, |
| /*weak*/ false, noreg); |
| %} |
| |
| enc_class aarch64_enc_cmpxchgb(memory mem, iRegINoSp oldval, iRegINoSp newval) %{ |
| MacroAssembler _masm(&cbuf); |
| guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); |
| __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::byte, /*acquire*/ false, /*release*/ true, |
| /*weak*/ false, noreg); |
| %} |
| |
| |
| // The only difference between aarch64_enc_cmpxchg and |
| // aarch64_enc_cmpxchg_acq is that we use load-acquire in the |
| // CompareAndSwap sequence to serve as a barrier on acquiring a |
| // lock. |
| enc_class aarch64_enc_cmpxchg_acq(memory mem, iRegLNoSp oldval, iRegLNoSp newval) %{ |
| MacroAssembler _masm(&cbuf); |
| guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); |
| __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::xword, /*acquire*/ true, /*release*/ true, |
| /*weak*/ false, noreg); |
| %} |
| |
| enc_class aarch64_enc_cmpxchgw_acq(memory mem, iRegINoSp oldval, iRegINoSp newval) %{ |
| MacroAssembler _masm(&cbuf); |
| guarantee($mem$$index == -1 && $mem$$disp == 0, "impossible encoding"); |
| __ cmpxchg($mem$$base$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::word, /*acquire*/ true, /*release*/ true, |
| /*weak*/ false, noreg); |
| %} |
| |
| |
| // auxiliary used for CompareAndSwapX to set result register |
| enc_class aarch64_enc_cset_eq(iRegINoSp res) %{ |
| MacroAssembler _masm(&cbuf); |
| Register res_reg = as_Register($res$$reg); |
| __ cset(res_reg, Assembler::EQ); |
| %} |
| |
| // prefetch encodings |
| |
| enc_class aarch64_enc_prefetchw(memory mem) %{ |
| MacroAssembler _masm(&cbuf); |
| Register base = as_Register($mem$$base); |
| int index = $mem$$index; |
| int scale = $mem$$scale; |
| int disp = $mem$$disp; |
| if (index == -1) { |
| __ prfm(Address(base, disp), PSTL1KEEP); |
| } else { |
| Register index_reg = as_Register(index); |
| if (disp == 0) { |
| __ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1KEEP); |
| } else { |
| __ lea(rscratch1, Address(base, disp)); |
| __ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1KEEP); |
| } |
| } |
| %} |
| |
| /// mov envcodings |
| |
| enc_class aarch64_enc_movw_imm(iRegI dst, immI src) %{ |
| MacroAssembler _masm(&cbuf); |
| u_int32_t con = (u_int32_t)$src$$constant; |
| Register dst_reg = as_Register($dst$$reg); |
| if (con == 0) { |
| __ movw(dst_reg, zr); |
| } else { |
| __ movw(dst_reg, con); |
| } |
| %} |
| |
| enc_class aarch64_enc_mov_imm(iRegL dst, immL src) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| u_int64_t con = (u_int64_t)$src$$constant; |
| if (con == 0) { |
| __ mov(dst_reg, zr); |
| } else { |
| __ mov(dst_reg, con); |
| } |
| %} |
| |
| enc_class aarch64_enc_mov_p(iRegP dst, immP src) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| address con = (address)$src$$constant; |
| if (con == NULL || con == (address)1) { |
| ShouldNotReachHere(); |
| } else { |
| relocInfo::relocType rtype = $src->constant_reloc(); |
| if (rtype == relocInfo::oop_type) { |
| __ movoop(dst_reg, (jobject)con, /*immediate*/true); |
| } else if (rtype == relocInfo::metadata_type) { |
| __ mov_metadata(dst_reg, (Metadata*)con); |
| } else { |
| assert(rtype == relocInfo::none, "unexpected reloc type"); |
| if (con < (address)(uintptr_t)os::vm_page_size()) { |
| __ mov(dst_reg, con); |
| } else { |
| unsigned long offset; |
| __ adrp(dst_reg, con, offset); |
| __ add(dst_reg, dst_reg, offset); |
| } |
| } |
| } |
| %} |
| |
| enc_class aarch64_enc_mov_p0(iRegP dst, immP0 src) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| __ mov(dst_reg, zr); |
| %} |
| |
| enc_class aarch64_enc_mov_p1(iRegP dst, immP_1 src) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| __ mov(dst_reg, (u_int64_t)1); |
| %} |
| |
| enc_class aarch64_enc_mov_poll_page(iRegP dst, immPollPage src) %{ |
| MacroAssembler _masm(&cbuf); |
| address page = (address)$src$$constant; |
| Register dst_reg = as_Register($dst$$reg); |
| unsigned long off; |
| __ adrp(dst_reg, Address(page, relocInfo::poll_type), off); |
| assert(off == 0, "assumed offset == 0"); |
| %} |
| |
| enc_class aarch64_enc_mov_byte_map_base(iRegP dst, immByteMapBase src) %{ |
| MacroAssembler _masm(&cbuf); |
| __ load_byte_map_base($dst$$Register); |
| %} |
| |
| enc_class aarch64_enc_mov_n(iRegN dst, immN src) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| address con = (address)$src$$constant; |
| if (con == NULL) { |
| ShouldNotReachHere(); |
| } else { |
| relocInfo::relocType rtype = $src->constant_reloc(); |
| assert(rtype == relocInfo::oop_type, "unexpected reloc type"); |
| __ set_narrow_oop(dst_reg, (jobject)con); |
| } |
| %} |
| |
| enc_class aarch64_enc_mov_n0(iRegN dst, immN0 src) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| __ mov(dst_reg, zr); |
| %} |
| |
| enc_class aarch64_enc_mov_nk(iRegN dst, immNKlass src) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| address con = (address)$src$$constant; |
| if (con == NULL) { |
| ShouldNotReachHere(); |
| } else { |
| relocInfo::relocType rtype = $src->constant_reloc(); |
| assert(rtype == relocInfo::metadata_type, "unexpected reloc type"); |
| __ set_narrow_klass(dst_reg, (Klass *)con); |
| } |
| %} |
| |
| // arithmetic encodings |
| |
| enc_class aarch64_enc_addsubw_imm(iRegI dst, iRegI src1, immIAddSub src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| Register src_reg = as_Register($src1$$reg); |
| int32_t con = (int32_t)$src2$$constant; |
| // add has primary == 0, subtract has primary == 1 |
| if ($primary) { con = -con; } |
| if (con < 0) { |
| __ subw(dst_reg, src_reg, -con); |
| } else { |
| __ addw(dst_reg, src_reg, con); |
| } |
| %} |
| |
| enc_class aarch64_enc_addsub_imm(iRegL dst, iRegL src1, immLAddSub src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| Register src_reg = as_Register($src1$$reg); |
| int32_t con = (int32_t)$src2$$constant; |
| // add has primary == 0, subtract has primary == 1 |
| if ($primary) { con = -con; } |
| if (con < 0) { |
| __ sub(dst_reg, src_reg, -con); |
| } else { |
| __ add(dst_reg, src_reg, con); |
| } |
| %} |
| |
| enc_class aarch64_enc_divw(iRegI dst, iRegI src1, iRegI src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| Register src1_reg = as_Register($src1$$reg); |
| Register src2_reg = as_Register($src2$$reg); |
| __ corrected_idivl(dst_reg, src1_reg, src2_reg, false, rscratch1); |
| %} |
| |
| enc_class aarch64_enc_div(iRegI dst, iRegI src1, iRegI src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| Register src1_reg = as_Register($src1$$reg); |
| Register src2_reg = as_Register($src2$$reg); |
| __ corrected_idivq(dst_reg, src1_reg, src2_reg, false, rscratch1); |
| %} |
| |
| enc_class aarch64_enc_modw(iRegI dst, iRegI src1, iRegI src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| Register src1_reg = as_Register($src1$$reg); |
| Register src2_reg = as_Register($src2$$reg); |
| __ corrected_idivl(dst_reg, src1_reg, src2_reg, true, rscratch1); |
| %} |
| |
| enc_class aarch64_enc_mod(iRegI dst, iRegI src1, iRegI src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register dst_reg = as_Register($dst$$reg); |
| Register src1_reg = as_Register($src1$$reg); |
| Register src2_reg = as_Register($src2$$reg); |
| __ corrected_idivq(dst_reg, src1_reg, src2_reg, true, rscratch1); |
| %} |
| |
| // compare instruction encodings |
| |
| enc_class aarch64_enc_cmpw(iRegI src1, iRegI src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register reg1 = as_Register($src1$$reg); |
| Register reg2 = as_Register($src2$$reg); |
| __ cmpw(reg1, reg2); |
| %} |
| |
| enc_class aarch64_enc_cmpw_imm_addsub(iRegI src1, immIAddSub src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register reg = as_Register($src1$$reg); |
| int32_t val = $src2$$constant; |
| if (val >= 0) { |
| __ subsw(zr, reg, val); |
| } else { |
| __ addsw(zr, reg, -val); |
| } |
| %} |
| |
| enc_class aarch64_enc_cmpw_imm(iRegI src1, immI src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register reg1 = as_Register($src1$$reg); |
| u_int32_t val = (u_int32_t)$src2$$constant; |
| __ movw(rscratch1, val); |
| __ cmpw(reg1, rscratch1); |
| %} |
| |
| enc_class aarch64_enc_cmp(iRegL src1, iRegL src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register reg1 = as_Register($src1$$reg); |
| Register reg2 = as_Register($src2$$reg); |
| __ cmp(reg1, reg2); |
| %} |
| |
| enc_class aarch64_enc_cmp_imm_addsub(iRegL src1, immL12 src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register reg = as_Register($src1$$reg); |
| int64_t val = $src2$$constant; |
| if (val >= 0) { |
| __ subs(zr, reg, val); |
| } else if (val != -val) { |
| __ adds(zr, reg, -val); |
| } else { |
| // aargh, Long.MIN_VALUE is a special case |
| __ orr(rscratch1, zr, (u_int64_t)val); |
| __ subs(zr, reg, rscratch1); |
| } |
| %} |
| |
| enc_class aarch64_enc_cmp_imm(iRegL src1, immL src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register reg1 = as_Register($src1$$reg); |
| u_int64_t val = (u_int64_t)$src2$$constant; |
| __ mov(rscratch1, val); |
| __ cmp(reg1, rscratch1); |
| %} |
| |
| enc_class aarch64_enc_cmpp(iRegP src1, iRegP src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register reg1 = as_Register($src1$$reg); |
| Register reg2 = as_Register($src2$$reg); |
| __ cmp(reg1, reg2); |
| %} |
| |
| enc_class aarch64_enc_cmpn(iRegN src1, iRegN src2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register reg1 = as_Register($src1$$reg); |
| Register reg2 = as_Register($src2$$reg); |
| __ cmpw(reg1, reg2); |
| %} |
| |
| enc_class aarch64_enc_testp(iRegP src) %{ |
| MacroAssembler _masm(&cbuf); |
| Register reg = as_Register($src$$reg); |
| __ cmp(reg, zr); |
| %} |
| |
| enc_class aarch64_enc_testn(iRegN src) %{ |
| MacroAssembler _masm(&cbuf); |
| Register reg = as_Register($src$$reg); |
| __ cmpw(reg, zr); |
| %} |
| |
| enc_class aarch64_enc_b(label lbl) %{ |
| MacroAssembler _masm(&cbuf); |
| Label *L = $lbl$$label; |
| __ b(*L); |
| %} |
| |
| enc_class aarch64_enc_br_con(cmpOp cmp, label lbl) %{ |
| MacroAssembler _masm(&cbuf); |
| Label *L = $lbl$$label; |
| __ br ((Assembler::Condition)$cmp$$cmpcode, *L); |
| %} |
| |
| enc_class aarch64_enc_br_conU(cmpOpU cmp, label lbl) %{ |
| MacroAssembler _masm(&cbuf); |
| Label *L = $lbl$$label; |
| __ br ((Assembler::Condition)$cmp$$cmpcode, *L); |
| %} |
| |
| enc_class aarch64_enc_partial_subtype_check(iRegP sub, iRegP super, iRegP temp, iRegP result) |
| %{ |
| Register sub_reg = as_Register($sub$$reg); |
| Register super_reg = as_Register($super$$reg); |
| Register temp_reg = as_Register($temp$$reg); |
| Register result_reg = as_Register($result$$reg); |
| |
| Label miss; |
| MacroAssembler _masm(&cbuf); |
| __ check_klass_subtype_slow_path(sub_reg, super_reg, temp_reg, result_reg, |
| NULL, &miss, |
| /*set_cond_codes:*/ true); |
| if ($primary) { |
| __ mov(result_reg, zr); |
| } |
| __ bind(miss); |
| %} |
| |
| enc_class aarch64_enc_java_static_call(method meth) %{ |
| MacroAssembler _masm(&cbuf); |
| |
| address addr = (address)$meth$$method; |
| address call; |
| if (!_method) { |
| // A call to a runtime wrapper, e.g. new, new_typeArray_Java, uncommon_trap. |
| call = __ trampoline_call(Address(addr, relocInfo::runtime_call_type), &cbuf); |
| } else { |
| int method_index = resolved_method_index(cbuf); |
| RelocationHolder rspec = _optimized_virtual ? opt_virtual_call_Relocation::spec(method_index) |
| : static_call_Relocation::spec(method_index); |
| call = __ trampoline_call(Address(addr, rspec), &cbuf); |
| |
| // Emit stub for static call |
| address stub = CompiledStaticCall::emit_to_interp_stub(cbuf); |
| if (stub == NULL) { |
| ciEnv::current()->record_failure("CodeCache is full"); |
| return; |
| } |
| } |
| if (call == NULL) { |
| ciEnv::current()->record_failure("CodeCache is full"); |
| return; |
| } |
| %} |
| |
| enc_class aarch64_enc_java_dynamic_call(method meth) %{ |
| MacroAssembler _masm(&cbuf); |
| int method_index = resolved_method_index(cbuf); |
| address call = __ ic_call((address)$meth$$method, method_index); |
| if (call == NULL) { |
| ciEnv::current()->record_failure("CodeCache is full"); |
| return; |
| } |
| %} |
| |
| enc_class aarch64_enc_call_epilog() %{ |
| MacroAssembler _masm(&cbuf); |
| if (VerifyStackAtCalls) { |
| // Check that stack depth is unchanged: find majik cookie on stack |
| __ call_Unimplemented(); |
| } |
| %} |
| |
| enc_class aarch64_enc_java_to_runtime(method meth) %{ |
| MacroAssembler _masm(&cbuf); |
| |
| // some calls to generated routines (arraycopy code) are scheduled |
| // by C2 as runtime calls. if so we can call them using a br (they |
| // will be in a reachable segment) otherwise we have to use a blrt |
| // which loads the absolute address into a register. |
| address entry = (address)$meth$$method; |
| CodeBlob *cb = CodeCache::find_blob(entry); |
| if (cb) { |
| address call = __ trampoline_call(Address(entry, relocInfo::runtime_call_type)); |
| if (call == NULL) { |
| ciEnv::current()->record_failure("CodeCache is full"); |
| return; |
| } |
| } else { |
| int gpcnt; |
| int fpcnt; |
| int rtype; |
| getCallInfo(tf(), gpcnt, fpcnt, rtype); |
| Label retaddr; |
| __ adr(rscratch2, retaddr); |
| __ lea(rscratch1, RuntimeAddress(entry)); |
| // Leave a breadcrumb for JavaFrameAnchor::capture_last_Java_pc() |
| __ stp(zr, rscratch2, Address(__ pre(sp, -2 * wordSize))); |
| __ blrt(rscratch1, gpcnt, fpcnt, rtype); |
| __ bind(retaddr); |
| __ add(sp, sp, 2 * wordSize); |
| } |
| %} |
| |
| enc_class aarch64_enc_rethrow() %{ |
| MacroAssembler _masm(&cbuf); |
| __ far_jump(RuntimeAddress(OptoRuntime::rethrow_stub())); |
| %} |
| |
| enc_class aarch64_enc_ret() %{ |
| MacroAssembler _masm(&cbuf); |
| __ ret(lr); |
| %} |
| |
| enc_class aarch64_enc_tail_call(iRegP jump_target) %{ |
| MacroAssembler _masm(&cbuf); |
| Register target_reg = as_Register($jump_target$$reg); |
| __ br(target_reg); |
| %} |
| |
| enc_class aarch64_enc_tail_jmp(iRegP jump_target) %{ |
| MacroAssembler _masm(&cbuf); |
| Register target_reg = as_Register($jump_target$$reg); |
| // exception oop should be in r0 |
| // ret addr has been popped into lr |
| // callee expects it in r3 |
| __ mov(r3, lr); |
| __ br(target_reg); |
| %} |
| |
| enc_class aarch64_enc_fast_lock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register oop = as_Register($object$$reg); |
| Register box = as_Register($box$$reg); |
| Register disp_hdr = as_Register($tmp$$reg); |
| Register tmp = as_Register($tmp2$$reg); |
| Label cont; |
| Label object_has_monitor; |
| Label cas_failed; |
| |
| assert_different_registers(oop, box, tmp, disp_hdr); |
| |
| // Load markOop from object into displaced_header. |
| __ ldr(disp_hdr, Address(oop, oopDesc::mark_offset_in_bytes())); |
| |
| // Always do locking in runtime. |
| if (EmitSync & 0x01) { |
| __ cmp(oop, zr); |
| return; |
| } |
| |
| if (UseBiasedLocking && !UseOptoBiasInlining) { |
| __ biased_locking_enter(box, oop, disp_hdr, tmp, true, cont); |
| } |
| |
| // Handle existing monitor |
| if ((EmitSync & 0x02) == 0) { |
| // we can use AArch64's bit test and branch here but |
| // markoopDesc does not define a bit index just the bit value |
| // so assert in case the bit pos changes |
| # define __monitor_value_log2 1 |
| assert(markOopDesc::monitor_value == (1 << __monitor_value_log2), "incorrect bit position"); |
| __ tbnz(disp_hdr, __monitor_value_log2, object_has_monitor); |
| # undef __monitor_value_log2 |
| } |
| |
| // Set displaced_header to be (markOop of object | UNLOCK_VALUE). |
| __ orr(disp_hdr, disp_hdr, markOopDesc::unlocked_value); |
| |
| // Load Compare Value application register. |
| |
| // Initialize the box. (Must happen before we update the object mark!) |
| __ str(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes())); |
| |
| // Compare object markOop with mark and if equal exchange scratch1 |
| // with object markOop. |
| if (UseLSE) { |
| __ mov(tmp, disp_hdr); |
| __ casal(Assembler::xword, tmp, box, oop); |
| __ cmp(tmp, disp_hdr); |
| __ br(Assembler::EQ, cont); |
| } else { |
| Label retry_load; |
| if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) |
| __ prfm(Address(oop), PSTL1STRM); |
| __ bind(retry_load); |
| __ ldaxr(tmp, oop); |
| __ cmp(tmp, disp_hdr); |
| __ br(Assembler::NE, cas_failed); |
| // use stlxr to ensure update is immediately visible |
| __ stlxr(tmp, box, oop); |
| __ cbzw(tmp, cont); |
| __ b(retry_load); |
| } |
| |
| // Formerly: |
| // __ cmpxchgptr(/*oldv=*/disp_hdr, |
| // /*newv=*/box, |
| // /*addr=*/oop, |
| // /*tmp=*/tmp, |
| // cont, |
| // /*fail*/NULL); |
| |
| assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); |
| |
| // If the compare-and-exchange succeeded, then we found an unlocked |
| // object, will have now locked it will continue at label cont |
| |
| __ bind(cas_failed); |
| // We did not see an unlocked object so try the fast recursive case. |
| |
| // Check if the owner is self by comparing the value in the |
| // markOop of object (disp_hdr) with the stack pointer. |
| __ mov(rscratch1, sp); |
| __ sub(disp_hdr, disp_hdr, rscratch1); |
| __ mov(tmp, (address) (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place)); |
| // If condition is true we are cont and hence we can store 0 as the |
| // displaced header in the box, which indicates that it is a recursive lock. |
| __ ands(tmp/*==0?*/, disp_hdr, tmp); |
| __ str(tmp/*==0, perhaps*/, Address(box, BasicLock::displaced_header_offset_in_bytes())); |
| |
| // Handle existing monitor. |
| if ((EmitSync & 0x02) == 0) { |
| __ b(cont); |
| |
| __ bind(object_has_monitor); |
| // The object's monitor m is unlocked iff m->owner == NULL, |
| // otherwise m->owner may contain a thread or a stack address. |
| // |
| // Try to CAS m->owner from NULL to current thread. |
| __ add(tmp, disp_hdr, (ObjectMonitor::owner_offset_in_bytes()-markOopDesc::monitor_value)); |
| __ mov(disp_hdr, zr); |
| |
| if (UseLSE) { |
| __ mov(rscratch1, disp_hdr); |
| __ casal(Assembler::xword, rscratch1, rthread, tmp); |
| __ cmp(rscratch1, disp_hdr); |
| } else { |
| Label retry_load, fail; |
| if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) |
| __ prfm(Address(tmp), PSTL1STRM); |
| __ bind(retry_load); |
| __ ldaxr(rscratch1, tmp); |
| __ cmp(disp_hdr, rscratch1); |
| __ br(Assembler::NE, fail); |
| // use stlxr to ensure update is immediately visible |
| __ stlxr(rscratch1, rthread, tmp); |
| __ cbnzw(rscratch1, retry_load); |
| __ bind(fail); |
| } |
| |
| // Label next; |
| // __ cmpxchgptr(/*oldv=*/disp_hdr, |
| // /*newv=*/rthread, |
| // /*addr=*/tmp, |
| // /*tmp=*/rscratch1, |
| // /*succeed*/next, |
| // /*fail*/NULL); |
| // __ bind(next); |
| |
| // store a non-null value into the box. |
| __ str(box, Address(box, BasicLock::displaced_header_offset_in_bytes())); |
| |
| // PPC port checks the following invariants |
| // #ifdef ASSERT |
| // bne(flag, cont); |
| // We have acquired the monitor, check some invariants. |
| // addw(/*monitor=*/tmp, tmp, -ObjectMonitor::owner_offset_in_bytes()); |
| // Invariant 1: _recursions should be 0. |
| // assert(ObjectMonitor::recursions_size_in_bytes() == 8, "unexpected size"); |
| // assert_mem8_is_zero(ObjectMonitor::recursions_offset_in_bytes(), tmp, |
| // "monitor->_recursions should be 0", -1); |
| // Invariant 2: OwnerIsThread shouldn't be 0. |
| // assert(ObjectMonitor::OwnerIsThread_size_in_bytes() == 4, "unexpected size"); |
| //assert_mem4_isnot_zero(ObjectMonitor::OwnerIsThread_offset_in_bytes(), tmp, |
| // "monitor->OwnerIsThread shouldn't be 0", -1); |
| // #endif |
| } |
| |
| __ bind(cont); |
| // flag == EQ indicates success |
| // flag == NE indicates failure |
| |
| %} |
| |
| // TODO |
| // reimplement this with custom cmpxchgptr code |
| // which avoids some of the unnecessary branching |
| enc_class aarch64_enc_fast_unlock(iRegP object, iRegP box, iRegP tmp, iRegP tmp2) %{ |
| MacroAssembler _masm(&cbuf); |
| Register oop = as_Register($object$$reg); |
| Register box = as_Register($box$$reg); |
| Register disp_hdr = as_Register($tmp$$reg); |
| Register tmp = as_Register($tmp2$$reg); |
| Label cont; |
| Label object_has_monitor; |
| Label cas_failed; |
| |
| assert_different_registers(oop, box, tmp, disp_hdr); |
| |
| // Always do locking in runtime. |
| if (EmitSync & 0x01) { |
| __ cmp(oop, zr); // Oop can't be 0 here => always false. |
| return; |
| } |
| |
| if (UseBiasedLocking && !UseOptoBiasInlining) { |
| __ biased_locking_exit(oop, tmp, cont); |
| } |
| |
| // Find the lock address and load the displaced header from the stack. |
| __ ldr(disp_hdr, Address(box, BasicLock::displaced_header_offset_in_bytes())); |
| |
| // If the displaced header is 0, we have a recursive unlock. |
| __ cmp(disp_hdr, zr); |
| __ br(Assembler::EQ, cont); |
| |
| |
| // Handle existing monitor. |
| if ((EmitSync & 0x02) == 0) { |
| __ ldr(tmp, Address(oop, oopDesc::mark_offset_in_bytes())); |
| __ tbnz(disp_hdr, exact_log2(markOopDesc::monitor_value), object_has_monitor); |
| } |
| |
| // Check if it is still a light weight lock, this is is true if we |
| // see the stack address of the basicLock in the markOop of the |
| // object. |
| |
| if (UseLSE) { |
| __ mov(tmp, box); |
| __ casl(Assembler::xword, tmp, disp_hdr, oop); |
| __ cmp(tmp, box); |
| } else { |
| Label retry_load; |
| if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH)) |
| __ prfm(Address(oop), PSTL1STRM); |
| __ bind(retry_load); |
| __ ldxr(tmp, oop); |
| __ cmp(box, tmp); |
| __ br(Assembler::NE, cas_failed); |
| // use stlxr to ensure update is immediately visible |
| __ stlxr(tmp, disp_hdr, oop); |
| __ cbzw(tmp, cont); |
| __ b(retry_load); |
| } |
| |
| // __ cmpxchgptr(/*compare_value=*/box, |
| // /*exchange_value=*/disp_hdr, |
| // /*where=*/oop, |
| // /*result=*/tmp, |
| // cont, |
| // /*cas_failed*/NULL); |
| assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0"); |
| |
| __ bind(cas_failed); |
| |
| // Handle existing monitor. |
| if ((EmitSync & 0x02) == 0) { |
| __ b(cont); |
| |
| __ bind(object_has_monitor); |
| __ add(tmp, tmp, -markOopDesc::monitor_value); // monitor |
| __ ldr(rscratch1, Address(tmp, ObjectMonitor::owner_offset_in_bytes())); |
| __ ldr(disp_hdr, Address(tmp, ObjectMonitor::recursions_offset_in_bytes())); |
| __ eor(rscratch1, rscratch1, rthread); // Will be 0 if we are the owner. |
| __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if there are 0 recursions |
| __ cmp(rscratch1, zr); |
| __ br(Assembler::NE, cont); |
| |
| __ ldr(rscratch1, Address(tmp, ObjectMonitor::EntryList_offset_in_bytes())); |
| __ ldr(disp_hdr, Address(tmp, ObjectMonitor::cxq_offset_in_bytes())); |
| __ orr(rscratch1, rscratch1, disp_hdr); // Will be 0 if both are 0. |
| __ cmp(rscratch1, zr); |
| __ cbnz(rscratch1, cont); |
| // need a release store here |
| __ lea(tmp, Address(tmp, ObjectMonitor::owner_offset_in_bytes())); |
| __ stlr(rscratch1, tmp); // rscratch1 is zero |
| } |
| |
| __ bind(cont); |
| // flag == EQ indicates success |
| // flag == NE indicates failure |
| %} |
| |
| %} |
| |
| //----------FRAME-------------------------------------------------------------- |
| // Definition of frame structure and management information. |
| // |
| // S T A C K L A Y O U T Allocators stack-slot number |
| // | (to get allocators register number |
| // G Owned by | | v add OptoReg::stack0()) |
| // r CALLER | | |
| // o | +--------+ pad to even-align allocators stack-slot |
| // w V | pad0 | numbers; owned by CALLER |
| // t -----------+--------+----> Matcher::_in_arg_limit, unaligned |
| // h ^ | in | 5 |
| // | | args | 4 Holes in incoming args owned by SELF |
| // | | | | 3 |
| // | | +--------+ |
| // V | | old out| Empty on Intel, window on Sparc |
| // | old |preserve| Must be even aligned. |
| // | SP-+--------+----> Matcher::_old_SP, even aligned |
| // | | in | 3 area for Intel ret address |
| // Owned by |preserve| Empty on Sparc. |
| // SELF +--------+ |
| // | | pad2 | 2 pad to align old SP |
| // | +--------+ 1 |
| // | | locks | 0 |
| // | +--------+----> OptoReg::stack0(), even aligned |
| // | | pad1 | 11 pad to align new SP |
| // | +--------+ |
| // | | | 10 |
| // | | spills | 9 spills |
| // V | | 8 (pad0 slot for callee) |
| // -----------+--------+----> Matcher::_out_arg_limit, unaligned |
| // ^ | out | 7 |
| // | | args | 6 Holes in outgoing args owned by CALLEE |
| // Owned by +--------+ |
| // CALLEE | new out| 6 Empty on Intel, window on Sparc |
| // | new |preserve| Must be even-aligned. |
| // | SP-+--------+----> Matcher::_new_SP, even aligned |
| // | | | |
| // |
| // Note 1: Only region 8-11 is determined by the allocator. Region 0-5 is |
| // known from SELF's arguments and the Java calling convention. |
| // Region 6-7 is determined per call site. |
| // Note 2: If the calling convention leaves holes in the incoming argument |
| // area, those holes are owned by SELF. Holes in the outgoing area |
| // are owned by the CALLEE. Holes should not be nessecary in the |
| // incoming area, as the Java calling convention is completely under |
| // the control of the AD file. Doubles can be sorted and packed to |
| // avoid holes. Holes in the outgoing arguments may be nessecary for |
| // varargs C calling conventions. |
| // Note 3: Region 0-3 is even aligned, with pad2 as needed. Region 3-5 is |
| // even aligned with pad0 as needed. |
| // Region 6 is even aligned. Region 6-7 is NOT even aligned; |
| // (the latter is true on Intel but is it false on AArch64?) |
| // region 6-11 is even aligned; it may be padded out more so that |
| // the region from SP to FP meets the minimum stack alignment. |
| // Note 4: For I2C adapters, the incoming FP may not meet the minimum stack |
| // alignment. Region 11, pad1, may be dynamically extended so that |
| // SP meets the minimum alignment. |
| |
| frame %{ |
| // What direction does stack grow in (assumed to be same for C & Java) |
| stack_direction(TOWARDS_LOW); |
| |
| // These three registers define part of the calling convention |
| // between compiled code and the interpreter. |
| |
| // Inline Cache Register or methodOop for I2C. |
| inline_cache_reg(R12); |
| |
| // Method Oop Register when calling interpreter. |
| interpreter_method_oop_reg(R12); |
| |
| // Number of stack slots consumed by locking an object |
| sync_stack_slots(2); |
| |
| // Compiled code's Frame Pointer |
| frame_pointer(R31); |
| |
| // Interpreter stores its frame pointer in a register which is |
| // stored to the stack by I2CAdaptors. |
| // I2CAdaptors convert from interpreted java to compiled java. |
| interpreter_frame_pointer(R29); |
| |
| // Stack alignment requirement |
| stack_alignment(StackAlignmentInBytes); // Alignment size in bytes (128-bit -> 16 bytes) |
| |
| // Number of stack slots between incoming argument block and the start of |
| // a new frame. The PROLOG must add this many slots to the stack. The |
| // EPILOG must remove this many slots. aarch64 needs two slots for |
| // return address and fp. |
| // TODO think this is correct but check |
| in_preserve_stack_slots(4); |
| |
| // Number of outgoing stack slots killed above the out_preserve_stack_slots |
| // for calls to C. Supports the var-args backing area for register parms. |
| varargs_C_out_slots_killed(frame::arg_reg_save_area_bytes/BytesPerInt); |
| |
| // The after-PROLOG location of the return address. Location of |
| // return address specifies a type (REG or STACK) and a number |
| // representing the register number (i.e. - use a register name) or |
| // stack slot. |
| // Ret Addr is on stack in slot 0 if no locks or verification or alignment. |
| // Otherwise, it is above the locks and verification slot and alignment word |
| // TODO this may well be correct but need to check why that - 2 is there |
| // ppc port uses 0 but we definitely need to allow for fixed_slots |
| // which folds in the space used for monitors |
| return_addr(STACK - 2 + |
| align_up((Compile::current()->in_preserve_stack_slots() + |
| Compile::current()->fixed_slots()), |
| stack_alignment_in_slots())); |
| |
| // Body of function which returns an integer array locating |
| // arguments either in registers or in stack slots. Passed an array |
| // of ideal registers called "sig" and a "length" count. Stack-slot |
| // offsets are based on outgoing arguments, i.e. a CALLER setting up |
| // arguments for a CALLEE. Incoming stack arguments are |
| // automatically biased by the preserve_stack_slots field above. |
| |
| calling_convention |
| %{ |
| // No difference between ingoing/outgoing just pass false |
| SharedRuntime::java_calling_convention(sig_bt, regs, length, false); |
| %} |
| |
| c_calling_convention |
| %{ |
| // This is obviously always outgoing |
| (void) SharedRuntime::c_calling_convention(sig_bt, regs, NULL, length); |
| %} |
| |
| // Location of compiled Java return values. Same as C for now. |
| return_value |
| %{ |
| // TODO do we allow ideal_reg == Op_RegN??? |
| assert(ideal_reg >= Op_RegI && ideal_reg <= Op_RegL, |
| "only return normal values"); |
| |
| static const int lo[Op_RegL + 1] = { // enum name |
| 0, // Op_Node |
| 0, // Op_Set |
| R0_num, // Op_RegN |
| R0_num, // Op_RegI |
| R0_num, // Op_RegP |
| V0_num, // Op_RegF |
| V0_num, // Op_RegD |
| R0_num // Op_RegL |
| }; |
| |
| static const int hi[Op_RegL + 1] = { // enum name |
| 0, // Op_Node |
| 0, // Op_Set |
| OptoReg::Bad, // Op_RegN |
| OptoReg::Bad, // Op_RegI |
| R0_H_num, // Op_RegP |
| OptoReg::Bad, // Op_RegF |
| V0_H_num, // Op_RegD |
| R0_H_num // Op_RegL |
| }; |
| |
| return OptoRegPair(hi[ideal_reg], lo[ideal_reg]); |
| %} |
| %} |
| |
| //----------ATTRIBUTES--------------------------------------------------------- |
| //----------Operand Attributes------------------------------------------------- |
| op_attrib op_cost(1); // Required cost attribute |
| |
| //----------Instruction Attributes--------------------------------------------- |
| ins_attrib ins_cost(INSN_COST); // Required cost attribute |
| ins_attrib ins_size(32); // Required size attribute (in bits) |
| ins_attrib ins_short_branch(0); // Required flag: is this instruction |
| // a non-matching short branch variant |
| // of some long branch? |
| ins_attrib ins_alignment(4); // Required alignment attribute (must |
| // be a power of 2) specifies the |
| // alignment that some part of the |
| // instruction (not necessarily the |
| // start) requires. If > 1, a |
| // compute_padding() function must be |
| // provided for the instruction |
| |
| //----------OPERANDS----------------------------------------------------------- |
| // Operand definitions must precede instruction definitions for correct parsing |
| // in the ADLC because operands constitute user defined types which are used in |
| // instruction definitions. |
| |
| //----------Simple Operands---------------------------------------------------- |
| |
| // Integer operands 32 bit |
| // 32 bit immediate |
| operand immI() |
| %{ |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 32 bit zero |
| operand immI0() |
| %{ |
| predicate(n->get_int() == 0); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 32 bit unit increment |
| operand immI_1() |
| %{ |
| predicate(n->get_int() == 1); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 32 bit unit decrement |
| operand immI_M1() |
| %{ |
| predicate(n->get_int() == -1); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Shift values for add/sub extension shift |
| operand immIExt() |
| %{ |
| predicate(0 <= n->get_int() && (n->get_int() <= 4)); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_le_4() |
| %{ |
| predicate(n->get_int() <= 4); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_31() |
| %{ |
| predicate(n->get_int() == 31); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_8() |
| %{ |
| predicate(n->get_int() == 8); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_16() |
| %{ |
| predicate(n->get_int() == 16); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_24() |
| %{ |
| predicate(n->get_int() == 24); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_32() |
| %{ |
| predicate(n->get_int() == 32); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_48() |
| %{ |
| predicate(n->get_int() == 48); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_56() |
| %{ |
| predicate(n->get_int() == 56); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_63() |
| %{ |
| predicate(n->get_int() == 63); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_64() |
| %{ |
| predicate(n->get_int() == 64); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_255() |
| %{ |
| predicate(n->get_int() == 255); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_65535() |
| %{ |
| predicate(n->get_int() == 65535); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immL_255() |
| %{ |
| predicate(n->get_long() == 255L); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immL_65535() |
| %{ |
| predicate(n->get_long() == 65535L); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immL_4294967295() |
| %{ |
| predicate(n->get_long() == 4294967295L); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immL_bitmask() |
| %{ |
| predicate(((n->get_long() & 0xc000000000000000l) == 0) |
| && is_power_of_2(n->get_long() + 1)); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immI_bitmask() |
| %{ |
| predicate(((n->get_int() & 0xc0000000) == 0) |
| && is_power_of_2(n->get_int() + 1)); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Scale values for scaled offset addressing modes (up to long but not quad) |
| operand immIScale() |
| %{ |
| predicate(0 <= n->get_int() && (n->get_int() <= 3)); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 26 bit signed offset -- for pc-relative branches |
| operand immI26() |
| %{ |
| predicate(((-(1 << 25)) <= n->get_int()) && (n->get_int() < (1 << 25))); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 19 bit signed offset -- for pc-relative loads |
| operand immI19() |
| %{ |
| predicate(((-(1 << 18)) <= n->get_int()) && (n->get_int() < (1 << 18))); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 12 bit unsigned offset -- for base plus immediate loads |
| operand immIU12() |
| %{ |
| predicate((0 <= n->get_int()) && (n->get_int() < (1 << 12))); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immLU12() |
| %{ |
| predicate((0 <= n->get_long()) && (n->get_long() < (1 << 12))); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Offset for scaled or unscaled immediate loads and stores |
| operand immIOffset() |
| %{ |
| predicate(Address::offset_ok_for_immed(n->get_int())); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immIOffset4() |
| %{ |
| predicate(Address::offset_ok_for_immed(n->get_int(), 2)); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immIOffset8() |
| %{ |
| predicate(Address::offset_ok_for_immed(n->get_int(), 3)); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immIOffset16() |
| %{ |
| predicate(Address::offset_ok_for_immed(n->get_int(), 4)); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immLoffset() |
| %{ |
| predicate(Address::offset_ok_for_immed(n->get_long())); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immLoffset4() |
| %{ |
| predicate(Address::offset_ok_for_immed(n->get_long(), 2)); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immLoffset8() |
| %{ |
| predicate(Address::offset_ok_for_immed(n->get_long(), 3)); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immLoffset16() |
| %{ |
| predicate(Address::offset_ok_for_immed(n->get_long(), 4)); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 32 bit integer valid for add sub immediate |
| operand immIAddSub() |
| %{ |
| predicate(Assembler::operand_valid_for_add_sub_immediate((long)n->get_int())); |
| match(ConI); |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 32 bit unsigned integer valid for logical immediate |
| // TODO -- check this is right when e.g the mask is 0x80000000 |
| operand immILog() |
| %{ |
| predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/true, (unsigned long)n->get_int())); |
| match(ConI); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Integer operands 64 bit |
| // 64 bit immediate |
| operand immL() |
| %{ |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 64 bit zero |
| operand immL0() |
| %{ |
| predicate(n->get_long() == 0); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 64 bit unit increment |
| operand immL_1() |
| %{ |
| predicate(n->get_long() == 1); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 64 bit unit decrement |
| operand immL_M1() |
| %{ |
| predicate(n->get_long() == -1); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 32 bit offset of pc in thread anchor |
| |
| operand immL_pc_off() |
| %{ |
| predicate(n->get_long() == in_bytes(JavaThread::frame_anchor_offset()) + |
| in_bytes(JavaFrameAnchor::last_Java_pc_offset())); |
| match(ConL); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 64 bit integer valid for add sub immediate |
| operand immLAddSub() |
| %{ |
| predicate(Assembler::operand_valid_for_add_sub_immediate(n->get_long())); |
| match(ConL); |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // 64 bit integer valid for logical immediate |
| operand immLLog() |
| %{ |
| predicate(Assembler::operand_valid_for_logical_immediate(/*is32*/false, (unsigned long)n->get_long())); |
| match(ConL); |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Long Immediate: low 32-bit mask |
| operand immL_32bits() |
| %{ |
| predicate(n->get_long() == 0xFFFFFFFFL); |
| match(ConL); |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Pointer operands |
| // Pointer Immediate |
| operand immP() |
| %{ |
| match(ConP); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // NULL Pointer Immediate |
| operand immP0() |
| %{ |
| predicate(n->get_ptr() == 0); |
| match(ConP); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Pointer Immediate One |
| // this is used in object initialization (initial object header) |
| operand immP_1() |
| %{ |
| predicate(n->get_ptr() == 1); |
| match(ConP); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Polling Page Pointer Immediate |
| operand immPollPage() |
| %{ |
| predicate((address)n->get_ptr() == os::get_polling_page()); |
| match(ConP); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Card Table Byte Map Base |
| operand immByteMapBase() |
| %{ |
| // Get base of card map |
| predicate((jbyte*)n->get_ptr() == |
| ((CardTableModRefBS*)(Universe::heap()->barrier_set()))->byte_map_base); |
| match(ConP); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Pointer Immediate Minus One |
| // this is used when we want to write the current PC to the thread anchor |
| operand immP_M1() |
| %{ |
| predicate(n->get_ptr() == -1); |
| match(ConP); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Pointer Immediate Minus Two |
| // this is used when we want to write the current PC to the thread anchor |
| operand immP_M2() |
| %{ |
| predicate(n->get_ptr() == -2); |
| match(ConP); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Float and Double operands |
| // Double Immediate |
| operand immD() |
| %{ |
| match(ConD); |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Double Immediate: +0.0d |
| operand immD0() |
| %{ |
| predicate(jlong_cast(n->getd()) == 0); |
| match(ConD); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // constant 'double +0.0'. |
| operand immDPacked() |
| %{ |
| predicate(Assembler::operand_valid_for_float_immediate(n->getd())); |
| match(ConD); |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Float Immediate |
| operand immF() |
| %{ |
| match(ConF); |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Float Immediate: +0.0f. |
| operand immF0() |
| %{ |
| predicate(jint_cast(n->getf()) == 0); |
| match(ConF); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // |
| operand immFPacked() |
| %{ |
| predicate(Assembler::operand_valid_for_float_immediate((double)n->getf())); |
| match(ConF); |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Narrow pointer operands |
| // Narrow Pointer Immediate |
| operand immN() |
| %{ |
| match(ConN); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Narrow NULL Pointer Immediate |
| operand immN0() |
| %{ |
| predicate(n->get_narrowcon() == 0); |
| match(ConN); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| operand immNKlass() |
| %{ |
| match(ConNKlass); |
| |
| op_cost(0); |
| format %{ %} |
| interface(CONST_INTER); |
| %} |
| |
| // Integer 32 bit Register Operands |
| // Integer 32 bitRegister (excludes SP) |
| operand iRegI() |
| %{ |
| constraint(ALLOC_IN_RC(any_reg32)); |
| match(RegI); |
| match(iRegINoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Integer 32 bit Register not Special |
| operand iRegINoSp() |
| %{ |
| constraint(ALLOC_IN_RC(no_special_reg32)); |
| match(RegI); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Integer 64 bit Register Operands |
| // Integer 64 bit Register (includes SP) |
| operand iRegL() |
| %{ |
| constraint(ALLOC_IN_RC(any_reg)); |
| match(RegL); |
| match(iRegLNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Integer 64 bit Register not Special |
| operand iRegLNoSp() |
| %{ |
| constraint(ALLOC_IN_RC(no_special_reg)); |
| match(RegL); |
| match(iRegL_R0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Pointer Register Operands |
| // Pointer Register |
| operand iRegP() |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(RegP); |
| match(iRegPNoSp); |
| match(iRegP_R0); |
| //match(iRegP_R2); |
| //match(iRegP_R4); |
| //match(iRegP_R5); |
| match(thread_RegP); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Pointer 64 bit Register not Special |
| operand iRegPNoSp() |
| %{ |
| constraint(ALLOC_IN_RC(no_special_ptr_reg)); |
| match(RegP); |
| // match(iRegP); |
| // match(iRegP_R0); |
| // match(iRegP_R2); |
| // match(iRegP_R4); |
| // match(iRegP_R5); |
| // match(thread_RegP); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Pointer 64 bit Register R0 only |
| operand iRegP_R0() |
| %{ |
| constraint(ALLOC_IN_RC(r0_reg)); |
| match(RegP); |
| // match(iRegP); |
| match(iRegPNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Pointer 64 bit Register R1 only |
| operand iRegP_R1() |
| %{ |
| constraint(ALLOC_IN_RC(r1_reg)); |
| match(RegP); |
| // match(iRegP); |
| match(iRegPNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Pointer 64 bit Register R2 only |
| operand iRegP_R2() |
| %{ |
| constraint(ALLOC_IN_RC(r2_reg)); |
| match(RegP); |
| // match(iRegP); |
| match(iRegPNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Pointer 64 bit Register R3 only |
| operand iRegP_R3() |
| %{ |
| constraint(ALLOC_IN_RC(r3_reg)); |
| match(RegP); |
| // match(iRegP); |
| match(iRegPNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Pointer 64 bit Register R4 only |
| operand iRegP_R4() |
| %{ |
| constraint(ALLOC_IN_RC(r4_reg)); |
| match(RegP); |
| // match(iRegP); |
| match(iRegPNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Pointer 64 bit Register R5 only |
| operand iRegP_R5() |
| %{ |
| constraint(ALLOC_IN_RC(r5_reg)); |
| match(RegP); |
| // match(iRegP); |
| match(iRegPNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Pointer 64 bit Register R10 only |
| operand iRegP_R10() |
| %{ |
| constraint(ALLOC_IN_RC(r10_reg)); |
| match(RegP); |
| // match(iRegP); |
| match(iRegPNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Long 64 bit Register R0 only |
| operand iRegL_R0() |
| %{ |
| constraint(ALLOC_IN_RC(r0_reg)); |
| match(RegL); |
| match(iRegLNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Long 64 bit Register R2 only |
| operand iRegL_R2() |
| %{ |
| constraint(ALLOC_IN_RC(r2_reg)); |
| match(RegL); |
| match(iRegLNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Long 64 bit Register R3 only |
| operand iRegL_R3() |
| %{ |
| constraint(ALLOC_IN_RC(r3_reg)); |
| match(RegL); |
| match(iRegLNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Long 64 bit Register R11 only |
| operand iRegL_R11() |
| %{ |
| constraint(ALLOC_IN_RC(r11_reg)); |
| match(RegL); |
| match(iRegLNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Pointer 64 bit Register FP only |
| operand iRegP_FP() |
| %{ |
| constraint(ALLOC_IN_RC(fp_reg)); |
| match(RegP); |
| // match(iRegP); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Register R0 only |
| operand iRegI_R0() |
| %{ |
| constraint(ALLOC_IN_RC(int_r0_reg)); |
| match(RegI); |
| match(iRegINoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Register R2 only |
| operand iRegI_R2() |
| %{ |
| constraint(ALLOC_IN_RC(int_r2_reg)); |
| match(RegI); |
| match(iRegINoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Register R3 only |
| operand iRegI_R3() |
| %{ |
| constraint(ALLOC_IN_RC(int_r3_reg)); |
| match(RegI); |
| match(iRegINoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| |
| // Register R4 only |
| operand iRegI_R4() |
| %{ |
| constraint(ALLOC_IN_RC(int_r4_reg)); |
| match(RegI); |
| match(iRegINoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| |
| // Pointer Register Operands |
| // Narrow Pointer Register |
| operand iRegN() |
| %{ |
| constraint(ALLOC_IN_RC(any_reg32)); |
| match(RegN); |
| match(iRegNNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| operand iRegN_R0() |
| %{ |
| constraint(ALLOC_IN_RC(r0_reg)); |
| match(iRegN); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| operand iRegN_R2() |
| %{ |
| constraint(ALLOC_IN_RC(r2_reg)); |
| match(iRegN); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| operand iRegN_R3() |
| %{ |
| constraint(ALLOC_IN_RC(r3_reg)); |
| match(iRegN); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Integer 64 bit Register not Special |
| operand iRegNNoSp() |
| %{ |
| constraint(ALLOC_IN_RC(no_special_reg32)); |
| match(RegN); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // heap base register -- used for encoding immN0 |
| |
| operand iRegIHeapbase() |
| %{ |
| constraint(ALLOC_IN_RC(heapbase_reg)); |
| match(RegI); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Float Register |
| // Float register operands |
| operand vRegF() |
| %{ |
| constraint(ALLOC_IN_RC(float_reg)); |
| match(RegF); |
| |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Double Register |
| // Double register operands |
| operand vRegD() |
| %{ |
| constraint(ALLOC_IN_RC(double_reg)); |
| match(RegD); |
| |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| operand vecD() |
| %{ |
| constraint(ALLOC_IN_RC(vectord_reg)); |
| match(VecD); |
| |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| operand vecX() |
| %{ |
| constraint(ALLOC_IN_RC(vectorx_reg)); |
| match(VecX); |
| |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| operand vRegD_V0() |
| %{ |
| constraint(ALLOC_IN_RC(v0_reg)); |
| match(RegD); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| operand vRegD_V1() |
| %{ |
| constraint(ALLOC_IN_RC(v1_reg)); |
| match(RegD); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| operand vRegD_V2() |
| %{ |
| constraint(ALLOC_IN_RC(v2_reg)); |
| match(RegD); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| operand vRegD_V3() |
| %{ |
| constraint(ALLOC_IN_RC(v3_reg)); |
| match(RegD); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Flags register, used as output of signed compare instructions |
| |
| // note that on AArch64 we also use this register as the output for |
| // for floating point compare instructions (CmpF CmpD). this ensures |
| // that ordered inequality tests use GT, GE, LT or LE none of which |
| // pass through cases where the result is unordered i.e. one or both |
| // inputs to the compare is a NaN. this means that the ideal code can |
| // replace e.g. a GT with an LE and not end up capturing the NaN case |
| // (where the comparison should always fail). EQ and NE tests are |
| // always generated in ideal code so that unordered folds into the NE |
| // case, matching the behaviour of AArch64 NE. |
| // |
| // This differs from x86 where the outputs of FP compares use a |
| // special FP flags registers and where compares based on this |
| // register are distinguished into ordered inequalities (cmpOpUCF) and |
| // EQ/NEQ tests (cmpOpUCF2). x86 has to special case the latter tests |
| // to explicitly handle the unordered case in branches. x86 also has |
| // to include extra CMoveX rules to accept a cmpOpUCF input. |
| |
| operand rFlagsReg() |
| %{ |
| constraint(ALLOC_IN_RC(int_flags)); |
| match(RegFlags); |
| |
| op_cost(0); |
| format %{ "RFLAGS" %} |
| interface(REG_INTER); |
| %} |
| |
| // Flags register, used as output of unsigned compare instructions |
| operand rFlagsRegU() |
| %{ |
| constraint(ALLOC_IN_RC(int_flags)); |
| match(RegFlags); |
| |
| op_cost(0); |
| format %{ "RFLAGSU" %} |
| interface(REG_INTER); |
| %} |
| |
| // Special Registers |
| |
| // Method Register |
| operand inline_cache_RegP(iRegP reg) |
| %{ |
| constraint(ALLOC_IN_RC(method_reg)); // inline_cache_reg |
| match(reg); |
| match(iRegPNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| operand interpreter_method_oop_RegP(iRegP reg) |
| %{ |
| constraint(ALLOC_IN_RC(method_reg)); // interpreter_method_oop_reg |
| match(reg); |
| match(iRegPNoSp); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| // Thread Register |
| operand thread_RegP(iRegP reg) |
| %{ |
| constraint(ALLOC_IN_RC(thread_reg)); // link_reg |
| match(reg); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| operand lr_RegP(iRegP reg) |
| %{ |
| constraint(ALLOC_IN_RC(lr_reg)); // link_reg |
| match(reg); |
| op_cost(0); |
| format %{ %} |
| interface(REG_INTER); |
| %} |
| |
| //----------Memory Operands---------------------------------------------------- |
| |
| operand indirect(iRegP reg) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(reg); |
| op_cost(0); |
| format %{ "[$reg]" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp(0x0); |
| %} |
| %} |
| |
| operand indIndexScaledI2L(iRegP reg, iRegI ireg, immIScale scale) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int())); |
| match(AddP reg (LShiftL (ConvI2L ireg) scale)); |
| op_cost(0); |
| format %{ "$reg, $ireg sxtw($scale), 0, I2L" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index($ireg); |
| scale($scale); |
| disp(0x0); |
| %} |
| %} |
| |
| operand indIndexScaled(iRegP reg, iRegL lreg, immIScale scale) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| predicate(size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int())); |
| match(AddP reg (LShiftL lreg scale)); |
| op_cost(0); |
| format %{ "$reg, $lreg lsl($scale)" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index($lreg); |
| scale($scale); |
| disp(0x0); |
| %} |
| %} |
| |
| operand indIndexI2L(iRegP reg, iRegI ireg) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP reg (ConvI2L ireg)); |
| op_cost(0); |
| format %{ "$reg, $ireg, 0, I2L" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index($ireg); |
| scale(0x0); |
| disp(0x0); |
| %} |
| %} |
| |
| operand indIndex(iRegP reg, iRegL lreg) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP reg lreg); |
| op_cost(0); |
| format %{ "$reg, $lreg" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index($lreg); |
| scale(0x0); |
| disp(0x0); |
| %} |
| %} |
| |
| operand indOffI(iRegP reg, immIOffset off) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP reg off); |
| op_cost(0); |
| format %{ "[$reg, $off]" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp($off); |
| %} |
| %} |
| |
| operand indOffI4(iRegP reg, immIOffset4 off) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP reg off); |
| op_cost(0); |
| format %{ "[$reg, $off]" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp($off); |
| %} |
| %} |
| |
| operand indOffI8(iRegP reg, immIOffset8 off) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP reg off); |
| op_cost(0); |
| format %{ "[$reg, $off]" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp($off); |
| %} |
| %} |
| |
| operand indOffI16(iRegP reg, immIOffset16 off) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP reg off); |
| op_cost(0); |
| format %{ "[$reg, $off]" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp($off); |
| %} |
| %} |
| |
| operand indOffL(iRegP reg, immLoffset off) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP reg off); |
| op_cost(0); |
| format %{ "[$reg, $off]" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp($off); |
| %} |
| %} |
| |
| operand indOffL4(iRegP reg, immLoffset4 off) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP reg off); |
| op_cost(0); |
| format %{ "[$reg, $off]" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp($off); |
| %} |
| %} |
| |
| operand indOffL8(iRegP reg, immLoffset8 off) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP reg off); |
| op_cost(0); |
| format %{ "[$reg, $off]" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp($off); |
| %} |
| %} |
| |
| operand indOffL16(iRegP reg, immLoffset16 off) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP reg off); |
| op_cost(0); |
| format %{ "[$reg, $off]" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp($off); |
| %} |
| %} |
| |
| operand indirectN(iRegN reg) |
| %{ |
| predicate(Universe::narrow_oop_shift() == 0); |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(DecodeN reg); |
| op_cost(0); |
| format %{ "[$reg]\t# narrow" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp(0x0); |
| %} |
| %} |
| |
| operand indIndexScaledI2LN(iRegN reg, iRegI ireg, immIScale scale) |
| %{ |
| predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int())); |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP (DecodeN reg) (LShiftL (ConvI2L ireg) scale)); |
| op_cost(0); |
| format %{ "$reg, $ireg sxtw($scale), 0, I2L\t# narrow" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index($ireg); |
| scale($scale); |
| disp(0x0); |
| %} |
| %} |
| |
| operand indIndexScaledN(iRegN reg, iRegL lreg, immIScale scale) |
| %{ |
| predicate(Universe::narrow_oop_shift() == 0 && size_fits_all_mem_uses(n->as_AddP(), n->in(AddPNode::Offset)->in(2)->get_int())); |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP (DecodeN reg) (LShiftL lreg scale)); |
| op_cost(0); |
| format %{ "$reg, $lreg lsl($scale)\t# narrow" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index($lreg); |
| scale($scale); |
| disp(0x0); |
| %} |
| %} |
| |
| operand indIndexI2LN(iRegN reg, iRegI ireg) |
| %{ |
| predicate(Universe::narrow_oop_shift() == 0); |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP (DecodeN reg) (ConvI2L ireg)); |
| op_cost(0); |
| format %{ "$reg, $ireg, 0, I2L\t# narrow" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index($ireg); |
| scale(0x0); |
| disp(0x0); |
| %} |
| %} |
| |
| operand indIndexN(iRegN reg, iRegL lreg) |
| %{ |
| predicate(Universe::narrow_oop_shift() == 0); |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP (DecodeN reg) lreg); |
| op_cost(0); |
| format %{ "$reg, $lreg\t# narrow" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index($lreg); |
| scale(0x0); |
| disp(0x0); |
| %} |
| %} |
| |
| operand indOffIN(iRegN reg, immIOffset off) |
| %{ |
| predicate(Universe::narrow_oop_shift() == 0); |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP (DecodeN reg) off); |
| op_cost(0); |
| format %{ "[$reg, $off]\t# narrow" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp($off); |
| %} |
| %} |
| |
| operand indOffLN(iRegN reg, immLoffset off) |
| %{ |
| predicate(Universe::narrow_oop_shift() == 0); |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP (DecodeN reg) off); |
| op_cost(0); |
| format %{ "[$reg, $off]\t# narrow" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp($off); |
| %} |
| %} |
| |
| |
| |
| // AArch64 opto stubs need to write to the pc slot in the thread anchor |
| operand thread_anchor_pc(thread_RegP reg, immL_pc_off off) |
| %{ |
| constraint(ALLOC_IN_RC(ptr_reg)); |
| match(AddP reg off); |
| op_cost(0); |
| format %{ "[$reg, $off]" %} |
| interface(MEMORY_INTER) %{ |
| base($reg); |
| index(0xffffffff); |
| scale(0x0); |
| disp($off); |
| %} |
| %} |
| |
| //----------Special Memory Operands-------------------------------------------- |
| // Stack Slot Operand - This operand is used for loading and storing temporary |
| // values on the stack where a match requires a value to |
| // flow through memory. |
| operand stackSlotP(sRegP reg) |
| %{ |
| constraint(ALLOC_IN_RC(stack_slots)); |
| op_cost(100); |
| // No match rule because this operand is only generated in matching |
| // match(RegP); |
| format %{ "[$reg]" %} |
| interface(MEMORY_INTER) %{ |
| base(0x1e); // RSP |
| index(0x0); // No Index |
| scale(0x0); // No Scale |
| disp($reg); // Stack Offset |
| %} |
| %} |
| |
| operand stackSlotI(sRegI reg) |
| %{ |
| constraint(ALLOC_IN_RC(stack_slots)); |
| // No match rule because this operand is only generated in matching |
| // match(RegI); |
| format %{ "[$reg]" %} |
| interface(MEMORY_INTER) %{ |
| base(0x1e); // RSP |
| index(0x0); // No Index |
| scale(0x0); // No Scale |
| disp($reg); // Stack Offset |
| %} |
| %} |
| |
| operand stackSlotF(sRegF reg) |
| %{ |
| constraint(ALLOC_IN_RC(stack_slots)); |
| // No match rule because this operand is only generated in matching |
| // match(RegF); |
| format %{ "[$reg]" %} |
| interface(MEMORY_INTER) %{ |
| base(0x1e); // RSP |
| index(0x0); // No Index |
| scale(0x0); // No Scale |
| disp($reg); // Stack Offset |
| %} |
| %} |
| |
| operand stackSlotD(sRegD reg) |
| %{ |
| constraint(ALLOC_IN_RC(stack_slots)); |
| // No match rule because this operand is only generated in matching |
| // match(RegD); |
| format %{ "[$reg]" %} |
| interface(MEMORY_INTER) %{ |
| base(0x1e); // RSP |
| index(0x0); // No Index |
| scale(0x0); // No Scale |
| disp($reg); // Stack Offset |
| %} |
| %} |
| |
| operand stackSlotL(sRegL reg) |
| %{ |
| constraint(ALLOC_IN_RC(stack_slots)); |
| // No match rule because this operand is only generated in matching |
| // match(RegL); |
| format %{ "[$reg]" %} |
| interface(MEMORY_INTER) %{ |
| base(0x1e); // RSP |
| index(0x0); // No Index |
| scale(0x0); // No Scale |
| disp($reg); // Stack Offset |
| %} |
| %} |
| |
| // Operands for expressing Control Flow |
| // NOTE: Label is a predefined operand which should not be redefined in |
| // the AD file. It is generically handled within the ADLC. |
| |
| //----------Conditional Branch Operands---------------------------------------- |
| // Comparison Op - This is the operation of the comparison, and is limited to |
| // the following set of codes: |
| // L (<), LE (<=), G (>), GE (>=), E (==), NE (!=) |
| // |
| // Other attributes of the comparison, such as unsignedness, are specified |
| // by the comparison instruction that sets a condition code flags register. |
| // That result is represented by a flags operand whose subtype is appropriate |
| // to the unsignedness (etc.) of the comparison. |
| // |
| // Later, the instruction which matches both the Comparison Op (a Bool) and |
| // the flags (produced by the Cmp) specifies the coding of the comparison op |
| // by matching a specific subtype of Bool operand below, such as cmpOpU. |
| |
| // used for signed integral comparisons and fp comparisons |
| |
| operand cmpOp() |
| %{ |
| match(Bool); |
| |
| format %{ "" %} |
| interface(COND_INTER) %{ |
| equal(0x0, "eq"); |
| not_equal(0x1, "ne"); |
| less(0xb, "lt"); |
| greater_equal(0xa, "ge"); |
| less_equal(0xd, "le"); |
| greater(0xc, "gt"); |
| overflow(0x6, "vs"); |
| no_overflow(0x7, "vc"); |
| %} |
| %} |
| |
| // used for unsigned integral comparisons |
| |
| operand cmpOpU() |
| %{ |
| match(Bool); |
| |
| format %{ "" %} |
| interface(COND_INTER) %{ |
| equal(0x0, "eq"); |
| not_equal(0x1, "ne"); |
| less(0x3, "lo"); |
| greater_equal(0x2, "hs"); |
| less_equal(0x9, "ls"); |
| greater(0x8, "hi"); |
| overflow(0x6, "vs"); |
| no_overflow(0x7, "vc"); |
| %} |
| %} |
| |
| // used for certain integral comparisons which can be |
| // converted to cbxx or tbxx instructions |
| |
| operand cmpOpEqNe() |
| %{ |
| match(Bool); |
| match(CmpOp); |
| op_cost(0); |
| predicate(n->as_Bool()->_test._test == BoolTest::ne |
| || n->as_Bool()->_test._test == BoolTest::eq); |
| |
| format %{ "" %} |
| interface(COND_INTER) %{ |
| equal(0x0, "eq"); |
| not_equal(0x1, "ne"); |
| less(0xb, "lt"); |
| greater_equal(0xa, "ge"); |
| less_equal(0xd, "le"); |
| greater(0xc, "gt"); |
| overflow(0x6, "vs"); |
| no_overflow(0x7, "vc"); |
| %} |
| %} |
| |
| // used for certain integral comparisons which can be |
| // converted to cbxx or tbxx instructions |
| |
| operand cmpOpLtGe() |
| %{ |
| match(Bool); |
| match(CmpOp); |
| op_cost(0); |
| |
| predicate(n->as_Bool()->_test._test == BoolTest::lt |
| || n->as_Bool()->_test._test == BoolTest::ge); |
| |
| format %{ "" %} |
| interface(COND_INTER) %{ |
| equal(0x0, "eq"); |
| not_equal(0x1, "ne"); |
| less(0xb, "lt"); |
| greater_equal(0xa, "ge"); |
| less_equal(0xd, "le"); |
| greater(0xc, "gt"); |
| overflow(0x6, "vs"); |
| no_overflow(0x7, "vc"); |
| %} |
| %} |
| |
| // used for certain unsigned integral comparisons which can be |
| // converted to cbxx or tbxx instructions |
| |
| operand cmpOpUEqNeLtGe() |
| %{ |
| match(Bool); |
| match(CmpOp); |
| op_cost(0); |
| |
| predicate(n->as_Bool()->_test._test == BoolTest::eq |
| || n->as_Bool()->_test._test == BoolTest::ne |
| || n->as_Bool()->_test._test == BoolTest::lt |
| || n->as_Bool()->_test._test == BoolTest::ge); |
| |
| format %{ "" %} |
| interface(COND_INTER) %{ |
| equal(0x0, "eq"); |
| not_equal(0x1, "ne"); |
| less(0xb, "lt"); |
| greater_equal(0xa, "ge"); |
| less_equal(0xd, "le"); |
| greater(0xc, "gt"); |
| overflow(0x6, "vs"); |
| no_overflow(0x7, "vc"); |
| %} |
| %} |
| |
| // Special operand allowing long args to int ops to be truncated for free |
| |
| operand iRegL2I(iRegL reg) %{ |
| |
| op_cost(0); |
| |
| match(ConvL2I reg); |
| |
| format %{ "l2i($reg)" %} |
| |
| interface(REG_INTER) |
| %} |
| |
| opclass vmem4(indirect, indIndex, indOffI4, indOffL4); |
| opclass vmem8(indirect, indIndex, indOffI8, indOffL8); |
| opclass vmem16(indirect, indIndex, indOffI16, indOffL16); |
| |
| //----------OPERAND CLASSES---------------------------------------------------- |
| // Operand Classes are groups of operands that are used as to simplify |
| // instruction definitions by not requiring the AD writer to specify |
| // separate instructions for every form of operand when the |
| // instruction accepts multiple operand types with the same basic |
| // encoding and format. The classic case of this is memory operands. |
| |
| // memory is used to define read/write location for load/store |
| // instruction defs. we can turn a memory op into an Address |
| |
| opclass memory(indirect, indIndexScaled, indIndexScaledI2L, indIndexI2L, indIndex, indOffI, indOffL, |
| indirectN, indIndexScaledN, indIndexScaledI2LN, indIndexI2LN, indIndexN, indOffIN, indOffLN); |
| |
| // iRegIorL2I is used for src inputs in rules for 32 bit int (I) |
| // operations. it allows the src to be either an iRegI or a (ConvL2I |
| // iRegL). in the latter case the l2i normally planted for a ConvL2I |
| // can be elided because the 32-bit instruction will just employ the |
| // lower 32 bits anyway. |
| // |
| // n.b. this does not elide all L2I conversions. if the truncated |
| // value is consumed by more than one operation then the ConvL2I |
| // cannot be bundled into the consuming nodes so an l2i gets planted |
| // (actually a movw $dst $src) and the downstream instructions consume |
| // the result of the l2i as an iRegI input. That's a shame since the |
| // movw is actually redundant but its not too costly. |
| |
| opclass iRegIorL2I(iRegI, iRegL2I); |
| |
| //----------PIPELINE----------------------------------------------------------- |
| // Rules which define the behavior of the target architectures pipeline. |
| |
| // For specific pipelines, eg A53, define the stages of that pipeline |
| //pipe_desc(ISS, EX1, EX2, WR); |
| #define ISS S0 |
| #define EX1 S1 |
| #define EX2 S2 |
| #define WR S3 |
| |
| // Integer ALU reg operation |
| pipeline %{ |
| |
| attributes %{ |
| // ARM instructions are of fixed length |
| fixed_size_instructions; // Fixed size instructions TODO does |
| max_instructions_per_bundle = 2; // A53 = 2, A57 = 4 |
| // ARM instructions come in 32-bit word units |
| instruction_unit_size = 4; // An instruction is 4 bytes long |
| instruction_fetch_unit_size = 64; // The processor fetches one line |
| instruction_fetch_units = 1; // of 64 bytes |
| |
| // List of nop instructions |
| nops( MachNop ); |
| %} |
| |
| // We don't use an actual pipeline model so don't care about resources |
| // or description. we do use pipeline classes to introduce fixed |
| // latencies |
| |
| //----------RESOURCES---------------------------------------------------------- |
| // Resources are the functional units available to the machine |
| |
| resources( INS0, INS1, INS01 = INS0 | INS1, |
| ALU0, ALU1, ALU = ALU0 | ALU1, |
| MAC, |
| DIV, |
| BRANCH, |
| LDST, |
| NEON_FP); |
| |
| //----------PIPELINE DESCRIPTION----------------------------------------------- |
| // Pipeline Description specifies the stages in the machine's pipeline |
| |
| // Define the pipeline as a generic 6 stage pipeline |
| pipe_desc(S0, S1, S2, S3, S4, S5); |
| |
| //----------PIPELINE CLASSES--------------------------------------------------- |
| // Pipeline Classes describe the stages in which input and output are |
| // referenced by the hardware pipeline. |
| |
| pipe_class fp_dop_reg_reg_s(vRegF dst, vRegF src1, vRegF src2) |
| %{ |
| single_instruction; |
| src1 : S1(read); |
| src2 : S2(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_dop_reg_reg_d(vRegD dst, vRegD src1, vRegD src2) |
| %{ |
| single_instruction; |
| src1 : S1(read); |
| src2 : S2(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_uop_s(vRegF dst, vRegF src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_uop_d(vRegD dst, vRegD src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_d2f(vRegF dst, vRegD src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_f2d(vRegD dst, vRegF src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_f2i(iRegINoSp dst, vRegF src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_f2l(iRegLNoSp dst, vRegF src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_i2f(vRegF dst, iRegIorL2I src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_l2f(vRegF dst, iRegL src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_d2i(iRegINoSp dst, vRegD src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_d2l(iRegLNoSp dst, vRegD src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_i2d(vRegD dst, iRegIorL2I src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_l2d(vRegD dst, iRegIorL2I src) |
| %{ |
| single_instruction; |
| src : S1(read); |
| dst : S5(write); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_div_s(vRegF dst, vRegF src1, vRegF src2) |
| %{ |
| single_instruction; |
| src1 : S1(read); |
| src2 : S2(read); |
| dst : S5(write); |
| INS0 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_div_d(vRegD dst, vRegD src1, vRegD src2) |
| %{ |
| single_instruction; |
| src1 : S1(read); |
| src2 : S2(read); |
| dst : S5(write); |
| INS0 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class fp_cond_reg_reg_s(vRegF dst, vRegF src1, vRegF src2, rFlagsReg cr) |
| %{ |
| single_instruction; |
| cr : S1(read); |
| src1 : S1(read); |
| src2 : S1(read); |
| dst : S3(write); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class fp_cond_reg_reg_d(vRegD dst, vRegD src1, vRegD src2, rFlagsReg cr) |
| %{ |
| single_instruction; |
| cr : S1(read); |
| src1 : S1(read); |
| src2 : S1(read); |
| dst : S3(write); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class fp_imm_s(vRegF dst) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class fp_imm_d(vRegD dst) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class fp_load_constant_s(vRegF dst) |
| %{ |
| single_instruction; |
| dst : S4(write); |
| INS01 : ISS; |
| NEON_FP : S4; |
| %} |
| |
| pipe_class fp_load_constant_d(vRegD dst) |
| %{ |
| single_instruction; |
| dst : S4(write); |
| INS01 : ISS; |
| NEON_FP : S4; |
| %} |
| |
| pipe_class vmul64(vecD dst, vecD src1, vecD src2) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| src1 : S1(read); |
| src2 : S1(read); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class vmul128(vecX dst, vecX src1, vecX src2) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| src1 : S1(read); |
| src2 : S1(read); |
| INS0 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class vmla64(vecD dst, vecD src1, vecD src2) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| src1 : S1(read); |
| src2 : S1(read); |
| dst : S1(read); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class vmla128(vecX dst, vecX src1, vecX src2) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| src1 : S1(read); |
| src2 : S1(read); |
| dst : S1(read); |
| INS0 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class vdop64(vecD dst, vecD src1, vecD src2) |
| %{ |
| single_instruction; |
| dst : S4(write); |
| src1 : S2(read); |
| src2 : S2(read); |
| INS01 : ISS; |
| NEON_FP : S4; |
| %} |
| |
| pipe_class vdop128(vecX dst, vecX src1, vecX src2) |
| %{ |
| single_instruction; |
| dst : S4(write); |
| src1 : S2(read); |
| src2 : S2(read); |
| INS0 : ISS; |
| NEON_FP : S4; |
| %} |
| |
| pipe_class vlogical64(vecD dst, vecD src1, vecD src2) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| src1 : S2(read); |
| src2 : S2(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vlogical128(vecX dst, vecX src1, vecX src2) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| src1 : S2(read); |
| src2 : S2(read); |
| INS0 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vshift64(vecD dst, vecD src, vecX shift) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| src : S1(read); |
| shift : S1(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vshift128(vecX dst, vecX src, vecX shift) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| src : S1(read); |
| shift : S1(read); |
| INS0 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vshift64_imm(vecD dst, vecD src, immI shift) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| src : S1(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vshift128_imm(vecX dst, vecX src, immI shift) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| src : S1(read); |
| INS0 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vdop_fp64(vecD dst, vecD src1, vecD src2) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| src1 : S1(read); |
| src2 : S1(read); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class vdop_fp128(vecX dst, vecX src1, vecX src2) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| src1 : S1(read); |
| src2 : S1(read); |
| INS0 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class vmuldiv_fp64(vecD dst, vecD src1, vecD src2) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| src1 : S1(read); |
| src2 : S1(read); |
| INS0 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class vmuldiv_fp128(vecX dst, vecX src1, vecX src2) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| src1 : S1(read); |
| src2 : S1(read); |
| INS0 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class vsqrt_fp128(vecX dst, vecX src) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| src : S1(read); |
| INS0 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class vunop_fp64(vecD dst, vecD src) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| src : S1(read); |
| INS01 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class vunop_fp128(vecX dst, vecX src) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| src : S1(read); |
| INS0 : ISS; |
| NEON_FP : S5; |
| %} |
| |
| pipe_class vdup_reg_reg64(vecD dst, iRegI src) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| src : S1(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vdup_reg_reg128(vecX dst, iRegI src) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| src : S1(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vdup_reg_freg64(vecD dst, vRegF src) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| src : S1(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vdup_reg_freg128(vecX dst, vRegF src) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| src : S1(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vdup_reg_dreg128(vecX dst, vRegD src) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| src : S1(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vmovi_reg_imm64(vecD dst) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vmovi_reg_imm128(vecX dst) |
| %{ |
| single_instruction; |
| dst : S3(write); |
| INS0 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vload_reg_mem64(vecD dst, vmem8 mem) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| mem : ISS(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vload_reg_mem128(vecX dst, vmem16 mem) |
| %{ |
| single_instruction; |
| dst : S5(write); |
| mem : ISS(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vstore_reg_mem64(vecD src, vmem8 mem) |
| %{ |
| single_instruction; |
| mem : ISS(read); |
| src : S2(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| pipe_class vstore_reg_mem128(vecD src, vmem16 mem) |
| %{ |
| single_instruction; |
| mem : ISS(read); |
| src : S2(read); |
| INS01 : ISS; |
| NEON_FP : S3; |
| %} |
| |
| //------- Integer ALU operations -------------------------- |
| |
| // Integer ALU reg-reg operation |
| // Operands needed in EX1, result generated in EX2 |
| // Eg. ADD x0, x1, x2 |
| pipe_class ialu_reg_reg(iRegI dst, iRegI src1, iRegI src2) |
| %{ |
| single_instruction; |
| dst : EX2(write); |
| src1 : EX1(read); |
| src2 : EX1(read); |
| INS01 : ISS; // Dual issue as instruction 0 or 1 |
| ALU : EX2; |
| %} |
| |
| // Integer ALU reg-reg operation with constant shift |
| // Shifted register must be available in LATE_ISS instead of EX1 |
| // Eg. ADD x0, x1, x2, LSL #2 |
| pipe_class ialu_reg_reg_shift(iRegI dst, iRegI src1, iRegI src2, immI shift) |
| %{ |
| single_instruction; |
| dst : EX2(write); |
| src1 : EX1(read); |
| src2 : ISS(read); |
| INS01 : ISS; |
| ALU : EX2; |
| %} |
| |
| // Integer ALU reg operation with constant shift |
| // Eg. LSL x0, x1, #shift |
| pipe_class ialu_reg_shift(iRegI dst, iRegI src1) |
| %{ |
| single_instruction; |
| dst : EX2(write); |
| src1 : ISS(read); |
| INS01 : ISS; |
| ALU : EX2; |
| %} |
| |
| // Integer ALU reg-reg operation with variable shift |
| // Both operands must be available in LATE_ISS instead of EX1 |
| // Result is available in EX1 instead of EX2 |
| // Eg. LSLV x0, x1, x2 |
| pipe_class ialu_reg_reg_vshift(iRegI dst, iRegI src1, iRegI src2) |
| %{ |
| single_instruction; |
| dst : EX1(write); |
| src1 : ISS(read); |
| src2 : ISS(read); |
| INS01 : ISS; |
| ALU : EX1; |
| %} |
| |
| // Integer ALU reg-reg operation with extract |
| // As for _vshift above, but result generated in EX2 |
| // Eg. EXTR x0, x1, x2, #N |
| pipe_class ialu_reg_reg_extr(iRegI dst, iRegI src1, iRegI src2) |
| %{ |
| single_instruction; |
| dst : EX2(write); |
| src1 : ISS(read); |
| src2 : ISS(read); |
| INS1 : ISS; // Can only dual issue as Instruction 1 |
| ALU : EX1; |
| %} |
| |
| // Integer ALU reg operation |
| // Eg. NEG x0, x1 |
| pipe_class ialu_reg(iRegI dst, iRegI src) |
| %{ |
| single_instruction; |
| dst : EX2(write); |
| src : EX1(read); |
| INS01 : ISS; |
| ALU : EX2; |
| %} |
| |
| // Integer ALU reg mmediate operation |
| // Eg. ADD x0, x1, #N |
| pipe_class ialu_reg_imm(iRegI dst, iRegI src1) |
| %{ |
| single_instruction; |
| dst : EX2(write); |
| src1 : EX1(read); |
| INS01 : ISS; |
| ALU : EX2; |
| %} |
| |
| // Integer ALU immediate operation (no source operands) |
| // Eg. MOV x0, #N |
| pipe_class ialu_imm(iRegI dst) |
| %{ |
| single_instruction; |
| dst : EX1(write); |
| INS01 : ISS; |
| ALU : EX1; |
| %} |
| |
| //------- Compare operation ------------------------------- |
| |
| // Compare reg-reg |
| // Eg. CMP x0, x1 |
| pipe_class icmp_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2) |
| %{ |
| single_instruction; |
| // fixed_latency(16); |
| cr : EX2(write); |
| op1 : EX1(read); |
| op2 : EX1(read); |
| INS01 : ISS; |
| ALU : EX2; |
| %} |
| |
| // Compare reg-reg |
| // Eg. CMP x0, #N |
| pipe_class icmp_reg_imm(rFlagsReg cr, iRegI op1) |
| %{ |
| single_instruction; |
| // fixed_latency(16); |
| cr : EX2(write); |
| op1 : EX1(read); |
| INS01 : ISS; |
| ALU : EX2; |
| %} |
| |
| //------- Conditional instructions ------------------------ |
| |
| // Conditional no operands |
| // Eg. CSINC x0, zr, zr, <cond> |
| pipe_class icond_none(iRegI dst, rFlagsReg cr) |
| %{ |
| single_instruction; |
| cr : EX1(read); |
| dst : EX2(write); |
| INS01 : ISS; |
| ALU : EX2; |
| %} |
| |
| // Conditional 2 operand |
| // EG. CSEL X0, X1, X2, <cond> |
| pipe_class icond_reg_reg(iRegI dst, iRegI src1, iRegI src2, rFlagsReg cr) |
| %{ |
| single_instruction; |
| cr : EX1(read); |
| src1 : EX1(read); |
| src2 : EX1(read); |
| dst : EX2(write); |
| INS01 : ISS; |
| ALU : EX2; |
| %} |
| |
| // Conditional 2 operand |
| // EG. CSEL X0, X1, X2, <cond> |
| pipe_class icond_reg(iRegI dst, iRegI src, rFlagsReg cr) |
| %{ |
| single_instruction; |
| cr : EX1(read); |
| src : EX1(read); |
| dst : EX2(write); |
| INS01 : ISS; |
| ALU : EX2; |
| %} |
| |
| //------- Multiply pipeline operations -------------------- |
| |
| // Multiply reg-reg |
| // Eg. MUL w0, w1, w2 |
| pipe_class imul_reg_reg(iRegI dst, iRegI src1, iRegI src2) |
| %{ |
| single_instruction; |
| dst : WR(write); |
| src1 : ISS(read); |
| src2 : ISS(read); |
| INS01 : ISS; |
| MAC : WR; |
| %} |
| |
| // Multiply accumulate |
| // Eg. MADD w0, w1, w2, w3 |
| pipe_class imac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) |
| %{ |
| single_instruction; |
| dst : WR(write); |
| src1 : ISS(read); |
| src2 : ISS(read); |
| src3 : ISS(read); |
| INS01 : ISS; |
| MAC : WR; |
| %} |
| |
| // Eg. MUL w0, w1, w2 |
| pipe_class lmul_reg_reg(iRegI dst, iRegI src1, iRegI src2) |
| %{ |
| single_instruction; |
| fixed_latency(3); // Maximum latency for 64 bit mul |
| dst : WR(write); |
| src1 : ISS(read); |
| src2 : ISS(read); |
| INS01 : ISS; |
| MAC : WR; |
| %} |
| |
| // Multiply accumulate |
| // Eg. MADD w0, w1, w2, w3 |
| pipe_class lmac_reg_reg(iRegI dst, iRegI src1, iRegI src2, iRegI src3) |
| %{ |
| single_instruction; |
| fixed_latency(3); // Maximum latency for 64 bit mul |
| dst : WR(write); |
| src1 : ISS(read); |
| src2 : ISS(read); |
| src3 : ISS(read); |
| INS01 : ISS; |
| MAC : WR; |
| %} |
| |
| //------- Divide pipeline operations -------------------- |
| |
| // Eg. SDIV w0, w1, w2 |
| pipe_class idiv_reg_reg(iRegI dst, iRegI src1, iRegI src2) |
| %{ |
| single_instruction; |
| fixed_latency(8); // Maximum latency for 32 bit divide |
| dst : WR(write); |
| src1 : ISS(read); |
| src2 : ISS(read); |
| INS0 : ISS; // Can only dual issue as instruction 0 |
| DIV : WR; |
| %} |
| |
| // Eg. SDIV x0, x1, x2 |
| pipe_class ldiv_reg_reg(iRegI dst, iRegI src1, iRegI src2) |
| %{ |
| single_instruction; |
| fixed_latency(16); // Maximum latency for 64 bit divide |
| dst : WR(write); |
| src1 : ISS(read); |
| src2 : ISS(read); |
| INS0 : ISS; // Can only dual issue as instruction 0 |
| DIV : WR; |
| %} |
| |
| //------- Load pipeline operations ------------------------ |
| |
| // Load - prefetch |
| // Eg. PFRM <mem> |
| pipe_class iload_prefetch(memory mem) |
| %{ |
| single_instruction; |
| mem : ISS(read); |
| INS01 : ISS; |
| LDST : WR; |
| %} |
| |
| // Load - reg, mem |
| // Eg. LDR x0, <mem> |
| pipe_class iload_reg_mem(iRegI dst, memory mem) |
| %{ |
| single_instruction; |
| dst : WR(write); |
| mem : ISS(read); |
| INS01 : ISS; |
| LDST : WR; |
| %} |
| |
| // Load - reg, reg |
| // Eg. LDR x0, [sp, x1] |
| pipe_class iload_reg_reg(iRegI dst, iRegI src) |
| %{ |
| single_instruction; |
| dst : WR(write); |
| src : ISS(read); |
| INS01 : ISS; |
| LDST : WR; |
| %} |
| |
| //------- Store pipeline operations ----------------------- |
| |
| // Store - zr, mem |
| // Eg. STR zr, <mem> |
| pipe_class istore_mem(memory mem) |
| %{ |
| single_instruction; |
| mem : ISS(read); |
| INS01 : ISS; |
| LDST : WR; |
| %} |
| |
| // Store - reg, mem |
| // Eg. STR x0, <mem> |
| pipe_class istore_reg_mem(iRegI src, memory mem) |
| %{ |
| single_instruction; |
| mem : ISS(read); |
| src : EX2(read); |
| INS01 : ISS; |
| LDST : WR; |
| %} |
| |
| // Store - reg, reg |
| // Eg. STR x0, [sp, x1] |
| pipe_class istore_reg_reg(iRegI dst, iRegI src) |
| %{ |
| single_instruction; |
| dst : ISS(read); |
| src : EX2(read); |
| INS01 : ISS; |
| LDST : WR; |
| %} |
| |
| //------- Store pipeline operations ----------------------- |
| |
| // Branch |
| pipe_class pipe_branch() |
| %{ |
| single_instruction; |
| INS01 : ISS; |
| BRANCH : EX1; |
| %} |
| |
| // Conditional branch |
| pipe_class pipe_branch_cond(rFlagsReg cr) |
| %{ |
| single_instruction; |
| cr : EX1(read); |
| INS01 : ISS; |
| BRANCH : EX1; |
| %} |
| |
| // Compare & Branch |
| // EG. CBZ/CBNZ |
| pipe_class pipe_cmp_branch(iRegI op1) |
| %{ |
| single_instruction; |
| op1 : EX1(read); |
| INS01 : ISS; |
| BRANCH : EX1; |
| %} |
| |
| //------- Synchronisation operations ---------------------- |
| |
| // Any operation requiring serialization. |
| // EG. DMB/Atomic Ops/Load Acquire/Str Release |
| pipe_class pipe_serial() |
| %{ |
| single_instruction; |
| force_serialization; |
| fixed_latency(16); |
| INS01 : ISS(2); // Cannot dual issue with any other instruction |
| LDST : WR; |
| %} |
| |
| // Generic big/slow expanded idiom - also serialized |
| pipe_class pipe_slow() |
| %{ |
| instruction_count(10); |
| multiple_bundles; |
| force_serialization; |
| fixed_latency(16); |
| INS01 : ISS(2); // Cannot dual issue with any other instruction |
| LDST : WR; |
| %} |
| |
| // Empty pipeline class |
| pipe_class pipe_class_empty() |
| %{ |
| single_instruction; |
| fixed_latency(0); |
| %} |
| |
| // Default pipeline class. |
| pipe_class pipe_class_default() |
| %{ |
| single_instruction; |
| fixed_latency(2); |
| %} |
| |
| // Pipeline class for compares. |
| pipe_class pipe_class_compare() |
| %{ |
| single_instruction; |
| fixed_latency(16); |
| %} |
| |
| // Pipeline class for memory operations. |
| pipe_class pipe_class_memory() |
| %{ |
| single_instruction; |
| fixed_latency(16); |
| %} |
| |
| // Pipeline class for call. |
| pipe_class pipe_class_call() |
| %{ |
| single_instruction; |
| fixed_latency(100); |
| %} |
| |
| // Define the class for the Nop node. |
| define %{ |
| MachNop = pipe_class_empty; |
| %} |
| |
| %} |
| //----------INSTRUCTIONS------------------------------------------------------- |
| // |
| // match -- States which machine-independent subtree may be replaced |
| // by this instruction. |
| // ins_cost -- The estimated cost of this instruction is used by instruction |
| // selection to identify a minimum cost tree of machine |
| // instructions that matches a tree of machine-independent |
| // instructions. |
| // format -- A string providing the disassembly for this instruction. |
| // The value of an instruction's operand may be inserted |
| // by referring to it with a '$' prefix. |
| // opcode -- Three instruction opcodes may be provided. These are referred |
| // to within an encode class as $primary, $secondary, and $tertiary |
| // rrspectively. The primary opcode is commonly used to |
| // indicate the type of machine instruction, while secondary |
| // and tertiary are often used for prefix options or addressing |
| // modes. |
| // ins_encode -- A list of encode classes with parameters. The encode class |
| // name must have been defined in an 'enc_class' specification |
| // in the encode section of the architecture description. |
| |
| // ============================================================================ |
| // Memory (Load/Store) Instructions |
| |
| // Load Instructions |
| |
| // Load Byte (8 bit signed) |
| instruct loadB(iRegINoSp dst, memory mem) |
| %{ |
| match(Set dst (LoadB mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrsbw $dst, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_ldrsbw(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Byte (8 bit signed) into long |
| instruct loadB2L(iRegLNoSp dst, memory mem) |
| %{ |
| match(Set dst (ConvI2L (LoadB mem))); |
| predicate(!needs_acquiring_load(n->in(1))); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrsb $dst, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_ldrsb(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Byte (8 bit unsigned) |
| instruct loadUB(iRegINoSp dst, memory mem) |
| %{ |
| match(Set dst (LoadUB mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrbw $dst, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_ldrb(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Byte (8 bit unsigned) into long |
| instruct loadUB2L(iRegLNoSp dst, memory mem) |
| %{ |
| match(Set dst (ConvI2L (LoadUB mem))); |
| predicate(!needs_acquiring_load(n->in(1))); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrb $dst, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_ldrb(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Short (16 bit signed) |
| instruct loadS(iRegINoSp dst, memory mem) |
| %{ |
| match(Set dst (LoadS mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrshw $dst, $mem\t# short" %} |
| |
| ins_encode(aarch64_enc_ldrshw(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Short (16 bit signed) into long |
| instruct loadS2L(iRegLNoSp dst, memory mem) |
| %{ |
| match(Set dst (ConvI2L (LoadS mem))); |
| predicate(!needs_acquiring_load(n->in(1))); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrsh $dst, $mem\t# short" %} |
| |
| ins_encode(aarch64_enc_ldrsh(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Char (16 bit unsigned) |
| instruct loadUS(iRegINoSp dst, memory mem) |
| %{ |
| match(Set dst (LoadUS mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrh $dst, $mem\t# short" %} |
| |
| ins_encode(aarch64_enc_ldrh(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Short/Char (16 bit unsigned) into long |
| instruct loadUS2L(iRegLNoSp dst, memory mem) |
| %{ |
| match(Set dst (ConvI2L (LoadUS mem))); |
| predicate(!needs_acquiring_load(n->in(1))); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrh $dst, $mem\t# short" %} |
| |
| ins_encode(aarch64_enc_ldrh(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Integer (32 bit signed) |
| instruct loadI(iRegINoSp dst, memory mem) |
| %{ |
| match(Set dst (LoadI mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrw $dst, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_ldrw(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Integer (32 bit signed) into long |
| instruct loadI2L(iRegLNoSp dst, memory mem) |
| %{ |
| match(Set dst (ConvI2L (LoadI mem))); |
| predicate(!needs_acquiring_load(n->in(1))); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrsw $dst, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_ldrsw(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Integer (32 bit unsigned) into long |
| instruct loadUI2L(iRegLNoSp dst, memory mem, immL_32bits mask) |
| %{ |
| match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); |
| predicate(!needs_acquiring_load(n->in(1)->in(1)->as_Load())); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrw $dst, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_ldrw(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Long (64 bit signed) |
| instruct loadL(iRegLNoSp dst, memory mem) |
| %{ |
| match(Set dst (LoadL mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldr $dst, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_ldr(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Range |
| instruct loadRange(iRegINoSp dst, memory mem) |
| %{ |
| match(Set dst (LoadRange mem)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrw $dst, $mem\t# range" %} |
| |
| ins_encode(aarch64_enc_ldrw(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Pointer |
| instruct loadP(iRegPNoSp dst, memory mem) |
| %{ |
| match(Set dst (LoadP mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldr $dst, $mem\t# ptr" %} |
| |
| ins_encode(aarch64_enc_ldr(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Compressed Pointer |
| instruct loadN(iRegNNoSp dst, memory mem) |
| %{ |
| match(Set dst (LoadN mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrw $dst, $mem\t# compressed ptr" %} |
| |
| ins_encode(aarch64_enc_ldrw(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Klass Pointer |
| instruct loadKlass(iRegPNoSp dst, memory mem) |
| %{ |
| match(Set dst (LoadKlass mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldr $dst, $mem\t# class" %} |
| |
| ins_encode(aarch64_enc_ldr(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Narrow Klass Pointer |
| instruct loadNKlass(iRegNNoSp dst, memory mem) |
| %{ |
| match(Set dst (LoadNKlass mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrw $dst, $mem\t# compressed class ptr" %} |
| |
| ins_encode(aarch64_enc_ldrw(dst, mem)); |
| |
| ins_pipe(iload_reg_mem); |
| %} |
| |
| // Load Float |
| instruct loadF(vRegF dst, memory mem) |
| %{ |
| match(Set dst (LoadF mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrs $dst, $mem\t# float" %} |
| |
| ins_encode( aarch64_enc_ldrs(dst, mem) ); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // Load Double |
| instruct loadD(vRegD dst, memory mem) |
| %{ |
| match(Set dst (LoadD mem)); |
| predicate(!needs_acquiring_load(n)); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrd $dst, $mem\t# double" %} |
| |
| ins_encode( aarch64_enc_ldrd(dst, mem) ); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| |
| // Load Int Constant |
| instruct loadConI(iRegINoSp dst, immI src) |
| %{ |
| match(Set dst src); |
| |
| ins_cost(INSN_COST); |
| format %{ "mov $dst, $src\t# int" %} |
| |
| ins_encode( aarch64_enc_movw_imm(dst, src) ); |
| |
| ins_pipe(ialu_imm); |
| %} |
| |
| // Load Long Constant |
| instruct loadConL(iRegLNoSp dst, immL src) |
| %{ |
| match(Set dst src); |
| |
| ins_cost(INSN_COST); |
| format %{ "mov $dst, $src\t# long" %} |
| |
| ins_encode( aarch64_enc_mov_imm(dst, src) ); |
| |
| ins_pipe(ialu_imm); |
| %} |
| |
| // Load Pointer Constant |
| |
| instruct loadConP(iRegPNoSp dst, immP con) |
| %{ |
| match(Set dst con); |
| |
| ins_cost(INSN_COST * 4); |
| format %{ |
| "mov $dst, $con\t# ptr\n\t" |
| %} |
| |
| ins_encode(aarch64_enc_mov_p(dst, con)); |
| |
| ins_pipe(ialu_imm); |
| %} |
| |
| // Load Null Pointer Constant |
| |
| instruct loadConP0(iRegPNoSp dst, immP0 con) |
| %{ |
| match(Set dst con); |
| |
| ins_cost(INSN_COST); |
| format %{ "mov $dst, $con\t# NULL ptr" %} |
| |
| ins_encode(aarch64_enc_mov_p0(dst, con)); |
| |
| ins_pipe(ialu_imm); |
| %} |
| |
| // Load Pointer Constant One |
| |
| instruct loadConP1(iRegPNoSp dst, immP_1 con) |
| %{ |
| match(Set dst con); |
| |
| ins_cost(INSN_COST); |
| format %{ "mov $dst, $con\t# NULL ptr" %} |
| |
| ins_encode(aarch64_enc_mov_p1(dst, con)); |
| |
| ins_pipe(ialu_imm); |
| %} |
| |
| // Load Poll Page Constant |
| |
| instruct loadConPollPage(iRegPNoSp dst, immPollPage con) |
| %{ |
| match(Set dst con); |
| |
| ins_cost(INSN_COST); |
| format %{ "adr $dst, $con\t# Poll Page Ptr" %} |
| |
| ins_encode(aarch64_enc_mov_poll_page(dst, con)); |
| |
| ins_pipe(ialu_imm); |
| %} |
| |
| // Load Byte Map Base Constant |
| |
| instruct loadByteMapBase(iRegPNoSp dst, immByteMapBase con) |
| %{ |
| match(Set dst con); |
| |
| ins_cost(INSN_COST); |
| format %{ "adr $dst, $con\t# Byte Map Base" %} |
| |
| ins_encode(aarch64_enc_mov_byte_map_base(dst, con)); |
| |
| ins_pipe(ialu_imm); |
| %} |
| |
| // Load Narrow Pointer Constant |
| |
| instruct loadConN(iRegNNoSp dst, immN con) |
| %{ |
| match(Set dst con); |
| |
| ins_cost(INSN_COST * 4); |
| format %{ "mov $dst, $con\t# compressed ptr" %} |
| |
| ins_encode(aarch64_enc_mov_n(dst, con)); |
| |
| ins_pipe(ialu_imm); |
| %} |
| |
| // Load Narrow Null Pointer Constant |
| |
| instruct loadConN0(iRegNNoSp dst, immN0 con) |
| %{ |
| match(Set dst con); |
| |
| ins_cost(INSN_COST); |
| format %{ "mov $dst, $con\t# compressed NULL ptr" %} |
| |
| ins_encode(aarch64_enc_mov_n0(dst, con)); |
| |
| ins_pipe(ialu_imm); |
| %} |
| |
| // Load Narrow Klass Constant |
| |
| instruct loadConNKlass(iRegNNoSp dst, immNKlass con) |
| %{ |
| match(Set dst con); |
| |
| ins_cost(INSN_COST); |
| format %{ "mov $dst, $con\t# compressed klass ptr" %} |
| |
| ins_encode(aarch64_enc_mov_nk(dst, con)); |
| |
| ins_pipe(ialu_imm); |
| %} |
| |
| // Load Packed Float Constant |
| |
| instruct loadConF_packed(vRegF dst, immFPacked con) %{ |
| match(Set dst con); |
| ins_cost(INSN_COST * 4); |
| format %{ "fmovs $dst, $con"%} |
| ins_encode %{ |
| __ fmovs(as_FloatRegister($dst$$reg), (double)$con$$constant); |
| %} |
| |
| ins_pipe(fp_imm_s); |
| %} |
| |
| // Load Float Constant |
| |
| instruct loadConF(vRegF dst, immF con) %{ |
| match(Set dst con); |
| |
| ins_cost(INSN_COST * 4); |
| |
| format %{ |
| "ldrs $dst, [$constantaddress]\t# load from constant table: float=$con\n\t" |
| %} |
| |
| ins_encode %{ |
| __ ldrs(as_FloatRegister($dst$$reg), $constantaddress($con)); |
| %} |
| |
| ins_pipe(fp_load_constant_s); |
| %} |
| |
| // Load Packed Double Constant |
| |
| instruct loadConD_packed(vRegD dst, immDPacked con) %{ |
| match(Set dst con); |
| ins_cost(INSN_COST); |
| format %{ "fmovd $dst, $con"%} |
| ins_encode %{ |
| __ fmovd(as_FloatRegister($dst$$reg), $con$$constant); |
| %} |
| |
| ins_pipe(fp_imm_d); |
| %} |
| |
| // Load Double Constant |
| |
| instruct loadConD(vRegD dst, immD con) %{ |
| match(Set dst con); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ |
| "ldrd $dst, [$constantaddress]\t# load from constant table: float=$con\n\t" |
| %} |
| |
| ins_encode %{ |
| __ ldrd(as_FloatRegister($dst$$reg), $constantaddress($con)); |
| %} |
| |
| ins_pipe(fp_load_constant_d); |
| %} |
| |
| // Store Instructions |
| |
| // Store CMS card-mark Immediate |
| instruct storeimmCM0(immI0 zero, memory mem) |
| %{ |
| match(Set mem (StoreCM mem zero)); |
| predicate(unnecessary_storestore(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "strb zr, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_strb0(mem)); |
| |
| ins_pipe(istore_mem); |
| %} |
| |
| // Store CMS card-mark Immediate with intervening StoreStore |
| // needed when using CMS with no conditional card marking |
| instruct storeimmCM0_ordered(immI0 zero, memory mem) |
| %{ |
| match(Set mem (StoreCM mem zero)); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "dmb ishst" |
| "\n\tstrb zr, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_strb0_ordered(mem)); |
| |
| ins_pipe(istore_mem); |
| %} |
| |
| // Store Byte |
| instruct storeB(iRegIorL2I src, memory mem) |
| %{ |
| match(Set mem (StoreB mem src)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "strb $src, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_strb(src, mem)); |
| |
| ins_pipe(istore_reg_mem); |
| %} |
| |
| |
| instruct storeimmB0(immI0 zero, memory mem) |
| %{ |
| match(Set mem (StoreB mem zero)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "strb rscractch2, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_strb0(mem)); |
| |
| ins_pipe(istore_mem); |
| %} |
| |
| // Store Char/Short |
| instruct storeC(iRegIorL2I src, memory mem) |
| %{ |
| match(Set mem (StoreC mem src)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "strh $src, $mem\t# short" %} |
| |
| ins_encode(aarch64_enc_strh(src, mem)); |
| |
| ins_pipe(istore_reg_mem); |
| %} |
| |
| instruct storeimmC0(immI0 zero, memory mem) |
| %{ |
| match(Set mem (StoreC mem zero)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "strh zr, $mem\t# short" %} |
| |
| ins_encode(aarch64_enc_strh0(mem)); |
| |
| ins_pipe(istore_mem); |
| %} |
| |
| // Store Integer |
| |
| instruct storeI(iRegIorL2I src, memory mem) |
| %{ |
| match(Set mem(StoreI mem src)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "strw $src, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_strw(src, mem)); |
| |
| ins_pipe(istore_reg_mem); |
| %} |
| |
| instruct storeimmI0(immI0 zero, memory mem) |
| %{ |
| match(Set mem(StoreI mem zero)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "strw zr, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_strw0(mem)); |
| |
| ins_pipe(istore_mem); |
| %} |
| |
| // Store Long (64 bit signed) |
| instruct storeL(iRegL src, memory mem) |
| %{ |
| match(Set mem (StoreL mem src)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "str $src, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_str(src, mem)); |
| |
| ins_pipe(istore_reg_mem); |
| %} |
| |
| // Store Long (64 bit signed) |
| instruct storeimmL0(immL0 zero, memory mem) |
| %{ |
| match(Set mem (StoreL mem zero)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "str zr, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_str0(mem)); |
| |
| ins_pipe(istore_mem); |
| %} |
| |
| // Store Pointer |
| instruct storeP(iRegP src, memory mem) |
| %{ |
| match(Set mem (StoreP mem src)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "str $src, $mem\t# ptr" %} |
| |
| ins_encode(aarch64_enc_str(src, mem)); |
| |
| ins_pipe(istore_reg_mem); |
| %} |
| |
| // Store Pointer |
| instruct storeimmP0(immP0 zero, memory mem) |
| %{ |
| match(Set mem (StoreP mem zero)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "str zr, $mem\t# ptr" %} |
| |
| ins_encode(aarch64_enc_str0(mem)); |
| |
| ins_pipe(istore_mem); |
| %} |
| |
| // Store Compressed Pointer |
| instruct storeN(iRegN src, memory mem) |
| %{ |
| match(Set mem (StoreN mem src)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "strw $src, $mem\t# compressed ptr" %} |
| |
| ins_encode(aarch64_enc_strw(src, mem)); |
| |
| ins_pipe(istore_reg_mem); |
| %} |
| |
| instruct storeImmN0(iRegIHeapbase heapbase, immN0 zero, memory mem) |
| %{ |
| match(Set mem (StoreN mem zero)); |
| predicate(Universe::narrow_oop_base() == NULL && |
| Universe::narrow_klass_base() == NULL && |
| (!needs_releasing_store(n))); |
| |
| ins_cost(INSN_COST); |
| format %{ "strw rheapbase, $mem\t# compressed ptr (rheapbase==0)" %} |
| |
| ins_encode(aarch64_enc_strw(heapbase, mem)); |
| |
| ins_pipe(istore_reg_mem); |
| %} |
| |
| // Store Float |
| instruct storeF(vRegF src, memory mem) |
| %{ |
| match(Set mem (StoreF mem src)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "strs $src, $mem\t# float" %} |
| |
| ins_encode( aarch64_enc_strs(src, mem) ); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // TODO |
| // implement storeImmF0 and storeFImmPacked |
| |
| // Store Double |
| instruct storeD(vRegD src, memory mem) |
| %{ |
| match(Set mem (StoreD mem src)); |
| predicate(!needs_releasing_store(n)); |
| |
| ins_cost(INSN_COST); |
| format %{ "strd $src, $mem\t# double" %} |
| |
| ins_encode( aarch64_enc_strd(src, mem) ); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // Store Compressed Klass Pointer |
| instruct storeNKlass(iRegN src, memory mem) |
| %{ |
| predicate(!needs_releasing_store(n)); |
| match(Set mem (StoreNKlass mem src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "strw $src, $mem\t# compressed klass ptr" %} |
| |
| ins_encode(aarch64_enc_strw(src, mem)); |
| |
| ins_pipe(istore_reg_mem); |
| %} |
| |
| // TODO |
| // implement storeImmD0 and storeDImmPacked |
| |
| // prefetch instructions |
| // Must be safe to execute with invalid address (cannot fault). |
| |
| instruct prefetchalloc( memory mem ) %{ |
| match(PrefetchAllocation mem); |
| |
| ins_cost(INSN_COST); |
| format %{ "prfm $mem, PSTL1KEEP\t# Prefetch into level 1 cache write keep" %} |
| |
| ins_encode( aarch64_enc_prefetchw(mem) ); |
| |
| ins_pipe(iload_prefetch); |
| %} |
| |
| // ---------------- volatile loads and stores ---------------- |
| |
| // Load Byte (8 bit signed) |
| instruct loadB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (LoadB mem)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldarsb $dst, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_ldarsb(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Byte (8 bit signed) into long |
| instruct loadB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (ConvI2L (LoadB mem))); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldarsb $dst, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_ldarsb(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Byte (8 bit unsigned) |
| instruct loadUB_volatile(iRegINoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (LoadUB mem)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldarb $dst, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_ldarb(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Byte (8 bit unsigned) into long |
| instruct loadUB2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (ConvI2L (LoadUB mem))); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldarb $dst, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_ldarb(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Short (16 bit signed) |
| instruct loadS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (LoadS mem)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldarshw $dst, $mem\t# short" %} |
| |
| ins_encode(aarch64_enc_ldarshw(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct loadUS_volatile(iRegINoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (LoadUS mem)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldarhw $dst, $mem\t# short" %} |
| |
| ins_encode(aarch64_enc_ldarhw(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Short/Char (16 bit unsigned) into long |
| instruct loadUS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (ConvI2L (LoadUS mem))); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldarh $dst, $mem\t# short" %} |
| |
| ins_encode(aarch64_enc_ldarh(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Short/Char (16 bit signed) into long |
| instruct loadS2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (ConvI2L (LoadS mem))); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldarh $dst, $mem\t# short" %} |
| |
| ins_encode(aarch64_enc_ldarsh(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Integer (32 bit signed) |
| instruct loadI_volatile(iRegINoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (LoadI mem)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldarw $dst, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_ldarw(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Integer (32 bit unsigned) into long |
| instruct loadUI2L_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem, immL_32bits mask) |
| %{ |
| match(Set dst (AndL (ConvI2L (LoadI mem)) mask)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldarw $dst, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_ldarw(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Long (64 bit signed) |
| instruct loadL_volatile(iRegLNoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (LoadL mem)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldar $dst, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_ldar(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Pointer |
| instruct loadP_volatile(iRegPNoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (LoadP mem)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldar $dst, $mem\t# ptr" %} |
| |
| ins_encode(aarch64_enc_ldar(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Compressed Pointer |
| instruct loadN_volatile(iRegNNoSp dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (LoadN mem)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldarw $dst, $mem\t# compressed ptr" %} |
| |
| ins_encode(aarch64_enc_ldarw(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Float |
| instruct loadF_volatile(vRegF dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (LoadF mem)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldars $dst, $mem\t# float" %} |
| |
| ins_encode( aarch64_enc_fldars(dst, mem) ); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Load Double |
| instruct loadD_volatile(vRegD dst, /* sync_memory*/indirect mem) |
| %{ |
| match(Set dst (LoadD mem)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "ldard $dst, $mem\t# double" %} |
| |
| ins_encode( aarch64_enc_fldard(dst, mem) ); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Store Byte |
| instruct storeB_volatile(iRegIorL2I src, /* sync_memory*/indirect mem) |
| %{ |
| match(Set mem (StoreB mem src)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "stlrb $src, $mem\t# byte" %} |
| |
| ins_encode(aarch64_enc_stlrb(src, mem)); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // Store Char/Short |
| instruct storeC_volatile(iRegIorL2I src, /* sync_memory*/indirect mem) |
| %{ |
| match(Set mem (StoreC mem src)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "stlrh $src, $mem\t# short" %} |
| |
| ins_encode(aarch64_enc_stlrh(src, mem)); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // Store Integer |
| |
| instruct storeI_volatile(iRegIorL2I src, /* sync_memory*/indirect mem) |
| %{ |
| match(Set mem(StoreI mem src)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "stlrw $src, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_stlrw(src, mem)); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // Store Long (64 bit signed) |
| instruct storeL_volatile(iRegL src, /* sync_memory*/indirect mem) |
| %{ |
| match(Set mem (StoreL mem src)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "stlr $src, $mem\t# int" %} |
| |
| ins_encode(aarch64_enc_stlr(src, mem)); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // Store Pointer |
| instruct storeP_volatile(iRegP src, /* sync_memory*/indirect mem) |
| %{ |
| match(Set mem (StoreP mem src)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "stlr $src, $mem\t# ptr" %} |
| |
| ins_encode(aarch64_enc_stlr(src, mem)); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // Store Compressed Pointer |
| instruct storeN_volatile(iRegN src, /* sync_memory*/indirect mem) |
| %{ |
| match(Set mem (StoreN mem src)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "stlrw $src, $mem\t# compressed ptr" %} |
| |
| ins_encode(aarch64_enc_stlrw(src, mem)); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // Store Float |
| instruct storeF_volatile(vRegF src, /* sync_memory*/indirect mem) |
| %{ |
| match(Set mem (StoreF mem src)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "stlrs $src, $mem\t# float" %} |
| |
| ins_encode( aarch64_enc_fstlrs(src, mem) ); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // TODO |
| // implement storeImmF0 and storeFImmPacked |
| |
| // Store Double |
| instruct storeD_volatile(vRegD src, /* sync_memory*/indirect mem) |
| %{ |
| match(Set mem (StoreD mem src)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| format %{ "stlrd $src, $mem\t# double" %} |
| |
| ins_encode( aarch64_enc_fstlrd(src, mem) ); |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // ---------------- end of volatile loads and stores ---------------- |
| |
| // ============================================================================ |
| // BSWAP Instructions |
| |
| instruct bytes_reverse_int(iRegINoSp dst, iRegIorL2I src) %{ |
| match(Set dst (ReverseBytesI src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "revw $dst, $src" %} |
| |
| ins_encode %{ |
| __ revw(as_Register($dst$$reg), as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct bytes_reverse_long(iRegLNoSp dst, iRegL src) %{ |
| match(Set dst (ReverseBytesL src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "rev $dst, $src" %} |
| |
| ins_encode %{ |
| __ rev(as_Register($dst$$reg), as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct bytes_reverse_unsigned_short(iRegINoSp dst, iRegIorL2I src) %{ |
| match(Set dst (ReverseBytesUS src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "rev16w $dst, $src" %} |
| |
| ins_encode %{ |
| __ rev16w(as_Register($dst$$reg), as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct bytes_reverse_short(iRegINoSp dst, iRegIorL2I src) %{ |
| match(Set dst (ReverseBytesS src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "rev16w $dst, $src\n\t" |
| "sbfmw $dst, $dst, #0, #15" %} |
| |
| ins_encode %{ |
| __ rev16w(as_Register($dst$$reg), as_Register($src$$reg)); |
| __ sbfmw(as_Register($dst$$reg), as_Register($dst$$reg), 0U, 15U); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| // ============================================================================ |
| // Zero Count Instructions |
| |
| instruct countLeadingZerosI(iRegINoSp dst, iRegIorL2I src) %{ |
| match(Set dst (CountLeadingZerosI src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "clzw $dst, $src" %} |
| ins_encode %{ |
| __ clzw(as_Register($dst$$reg), as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct countLeadingZerosL(iRegINoSp dst, iRegL src) %{ |
| match(Set dst (CountLeadingZerosL src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "clz $dst, $src" %} |
| ins_encode %{ |
| __ clz(as_Register($dst$$reg), as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct countTrailingZerosI(iRegINoSp dst, iRegIorL2I src) %{ |
| match(Set dst (CountTrailingZerosI src)); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "rbitw $dst, $src\n\t" |
| "clzw $dst, $dst" %} |
| ins_encode %{ |
| __ rbitw(as_Register($dst$$reg), as_Register($src$$reg)); |
| __ clzw(as_Register($dst$$reg), as_Register($dst$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct countTrailingZerosL(iRegINoSp dst, iRegL src) %{ |
| match(Set dst (CountTrailingZerosL src)); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "rbit $dst, $src\n\t" |
| "clz $dst, $dst" %} |
| ins_encode %{ |
| __ rbit(as_Register($dst$$reg), as_Register($src$$reg)); |
| __ clz(as_Register($dst$$reg), as_Register($dst$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| //---------- Population Count Instructions ------------------------------------- |
| // |
| |
| instruct popCountI(iRegINoSp dst, iRegIorL2I src, vRegF tmp) %{ |
| predicate(UsePopCountInstruction); |
| match(Set dst (PopCountI src)); |
| effect(TEMP tmp); |
| ins_cost(INSN_COST * 13); |
| |
| format %{ "movw $src, $src\n\t" |
| "mov $tmp, $src\t# vector (1D)\n\t" |
| "cnt $tmp, $tmp\t# vector (8B)\n\t" |
| "addv $tmp, $tmp\t# vector (8B)\n\t" |
| "mov $dst, $tmp\t# vector (1D)" %} |
| ins_encode %{ |
| __ movw($src$$Register, $src$$Register); // ensure top 32 bits 0 |
| __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register); |
| __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); |
| __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); |
| __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct popCountI_mem(iRegINoSp dst, memory mem, vRegF tmp) %{ |
| predicate(UsePopCountInstruction); |
| match(Set dst (PopCountI (LoadI mem))); |
| effect(TEMP tmp); |
| ins_cost(INSN_COST * 13); |
| |
| format %{ "ldrs $tmp, $mem\n\t" |
| "cnt $tmp, $tmp\t# vector (8B)\n\t" |
| "addv $tmp, $tmp\t# vector (8B)\n\t" |
| "mov $dst, $tmp\t# vector (1D)" %} |
| ins_encode %{ |
| FloatRegister tmp_reg = as_FloatRegister($tmp$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrs, tmp_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); |
| __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); |
| __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // Note: Long.bitCount(long) returns an int. |
| instruct popCountL(iRegINoSp dst, iRegL src, vRegD tmp) %{ |
| predicate(UsePopCountInstruction); |
| match(Set dst (PopCountL src)); |
| effect(TEMP tmp); |
| ins_cost(INSN_COST * 13); |
| |
| format %{ "mov $tmp, $src\t# vector (1D)\n\t" |
| "cnt $tmp, $tmp\t# vector (8B)\n\t" |
| "addv $tmp, $tmp\t# vector (8B)\n\t" |
| "mov $dst, $tmp\t# vector (1D)" %} |
| ins_encode %{ |
| __ mov($tmp$$FloatRegister, __ T1D, 0, $src$$Register); |
| __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); |
| __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); |
| __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct popCountL_mem(iRegINoSp dst, memory mem, vRegD tmp) %{ |
| predicate(UsePopCountInstruction); |
| match(Set dst (PopCountL (LoadL mem))); |
| effect(TEMP tmp); |
| ins_cost(INSN_COST * 13); |
| |
| format %{ "ldrd $tmp, $mem\n\t" |
| "cnt $tmp, $tmp\t# vector (8B)\n\t" |
| "addv $tmp, $tmp\t# vector (8B)\n\t" |
| "mov $dst, $tmp\t# vector (1D)" %} |
| ins_encode %{ |
| FloatRegister tmp_reg = as_FloatRegister($tmp$$reg); |
| loadStore(MacroAssembler(&cbuf), &MacroAssembler::ldrd, tmp_reg, $mem->opcode(), |
| as_Register($mem$$base), $mem$$index, $mem$$scale, $mem$$disp); |
| __ cnt($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); |
| __ addv($tmp$$FloatRegister, __ T8B, $tmp$$FloatRegister); |
| __ mov($dst$$Register, $tmp$$FloatRegister, __ T1D, 0); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // ============================================================================ |
| // MemBar Instruction |
| |
| instruct load_fence() %{ |
| match(LoadFence); |
| ins_cost(VOLATILE_REF_COST); |
| |
| format %{ "load_fence" %} |
| |
| ins_encode %{ |
| __ membar(Assembler::LoadLoad|Assembler::LoadStore); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct unnecessary_membar_acquire() %{ |
| predicate(unnecessary_acquire(n)); |
| match(MemBarAcquire); |
| ins_cost(0); |
| |
| format %{ "membar_acquire (elided)" %} |
| |
| ins_encode %{ |
| __ block_comment("membar_acquire (elided)"); |
| %} |
| |
| ins_pipe(pipe_class_empty); |
| %} |
| |
| instruct membar_acquire() %{ |
| match(MemBarAcquire); |
| ins_cost(VOLATILE_REF_COST); |
| |
| format %{ "membar_acquire" %} |
| |
| ins_encode %{ |
| __ block_comment("membar_acquire"); |
| __ membar(Assembler::LoadLoad|Assembler::LoadStore); |
| %} |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| |
| instruct membar_acquire_lock() %{ |
| match(MemBarAcquireLock); |
| ins_cost(VOLATILE_REF_COST); |
| |
| format %{ "membar_acquire_lock (elided)" %} |
| |
| ins_encode %{ |
| __ block_comment("membar_acquire_lock (elided)"); |
| %} |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct store_fence() %{ |
| match(StoreFence); |
| ins_cost(VOLATILE_REF_COST); |
| |
| format %{ "store_fence" %} |
| |
| ins_encode %{ |
| __ membar(Assembler::LoadStore|Assembler::StoreStore); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct unnecessary_membar_release() %{ |
| predicate(unnecessary_release(n)); |
| match(MemBarRelease); |
| ins_cost(0); |
| |
| format %{ "membar_release (elided)" %} |
| |
| ins_encode %{ |
| __ block_comment("membar_release (elided)"); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct membar_release() %{ |
| match(MemBarRelease); |
| ins_cost(VOLATILE_REF_COST); |
| |
| format %{ "membar_release" %} |
| |
| ins_encode %{ |
| __ block_comment("membar_release"); |
| __ membar(Assembler::LoadStore|Assembler::StoreStore); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct membar_storestore() %{ |
| match(MemBarStoreStore); |
| ins_cost(VOLATILE_REF_COST); |
| |
| format %{ "MEMBAR-store-store" %} |
| |
| ins_encode %{ |
| __ membar(Assembler::StoreStore); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct membar_release_lock() %{ |
| match(MemBarReleaseLock); |
| ins_cost(VOLATILE_REF_COST); |
| |
| format %{ "membar_release_lock (elided)" %} |
| |
| ins_encode %{ |
| __ block_comment("membar_release_lock (elided)"); |
| %} |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct unnecessary_membar_volatile() %{ |
| predicate(unnecessary_volatile(n)); |
| match(MemBarVolatile); |
| ins_cost(0); |
| |
| format %{ "membar_volatile (elided)" %} |
| |
| ins_encode %{ |
| __ block_comment("membar_volatile (elided)"); |
| %} |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct membar_volatile() %{ |
| match(MemBarVolatile); |
| ins_cost(VOLATILE_REF_COST*100); |
| |
| format %{ "membar_volatile" %} |
| |
| ins_encode %{ |
| __ block_comment("membar_volatile"); |
| __ membar(Assembler::StoreLoad); |
| %} |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // ============================================================================ |
| // Cast/Convert Instructions |
| |
| instruct castX2P(iRegPNoSp dst, iRegL src) %{ |
| match(Set dst (CastX2P src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "mov $dst, $src\t# long -> ptr" %} |
| |
| ins_encode %{ |
| if ($dst$$reg != $src$$reg) { |
| __ mov(as_Register($dst$$reg), as_Register($src$$reg)); |
| } |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct castP2X(iRegLNoSp dst, iRegP src) %{ |
| match(Set dst (CastP2X src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "mov $dst, $src\t# ptr -> long" %} |
| |
| ins_encode %{ |
| if ($dst$$reg != $src$$reg) { |
| __ mov(as_Register($dst$$reg), as_Register($src$$reg)); |
| } |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| // Convert oop into int for vectors alignment masking |
| instruct convP2I(iRegINoSp dst, iRegP src) %{ |
| match(Set dst (ConvL2I (CastP2X src))); |
| |
| ins_cost(INSN_COST); |
| format %{ "movw $dst, $src\t# ptr -> int" %} |
| ins_encode %{ |
| __ movw($dst$$Register, $src$$Register); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| // Convert compressed oop into int for vectors alignment masking |
| // in case of 32bit oops (heap < 4Gb). |
| instruct convN2I(iRegINoSp dst, iRegN src) |
| %{ |
| predicate(Universe::narrow_oop_shift() == 0); |
| match(Set dst (ConvL2I (CastP2X (DecodeN src)))); |
| |
| ins_cost(INSN_COST); |
| format %{ "mov dst, $src\t# compressed ptr -> int" %} |
| ins_encode %{ |
| __ movw($dst$$Register, $src$$Register); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| |
| // Convert oop pointer into compressed form |
| instruct encodeHeapOop(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{ |
| predicate(n->bottom_type()->make_ptr()->ptr() != TypePtr::NotNull); |
| match(Set dst (EncodeP src)); |
| effect(KILL cr); |
| ins_cost(INSN_COST * 3); |
| format %{ "encode_heap_oop $dst, $src" %} |
| ins_encode %{ |
| Register s = $src$$Register; |
| Register d = $dst$$Register; |
| __ encode_heap_oop(d, s); |
| %} |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct encodeHeapOop_not_null(iRegNNoSp dst, iRegP src, rFlagsReg cr) %{ |
| predicate(n->bottom_type()->make_ptr()->ptr() == TypePtr::NotNull); |
| match(Set dst (EncodeP src)); |
| ins_cost(INSN_COST * 3); |
| format %{ "encode_heap_oop_not_null $dst, $src" %} |
| ins_encode %{ |
| __ encode_heap_oop_not_null($dst$$Register, $src$$Register); |
| %} |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct decodeHeapOop(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{ |
| predicate(n->bottom_type()->is_ptr()->ptr() != TypePtr::NotNull && |
| n->bottom_type()->is_ptr()->ptr() != TypePtr::Constant); |
| match(Set dst (DecodeN src)); |
| ins_cost(INSN_COST * 3); |
| format %{ "decode_heap_oop $dst, $src" %} |
| ins_encode %{ |
| Register s = $src$$Register; |
| Register d = $dst$$Register; |
| __ decode_heap_oop(d, s); |
| %} |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct decodeHeapOop_not_null(iRegPNoSp dst, iRegN src, rFlagsReg cr) %{ |
| predicate(n->bottom_type()->is_ptr()->ptr() == TypePtr::NotNull || |
| n->bottom_type()->is_ptr()->ptr() == TypePtr::Constant); |
| match(Set dst (DecodeN src)); |
| ins_cost(INSN_COST * 3); |
| format %{ "decode_heap_oop_not_null $dst, $src" %} |
| ins_encode %{ |
| Register s = $src$$Register; |
| Register d = $dst$$Register; |
| __ decode_heap_oop_not_null(d, s); |
| %} |
| ins_pipe(ialu_reg); |
| %} |
| |
| // n.b. AArch64 implementations of encode_klass_not_null and |
| // decode_klass_not_null do not modify the flags register so, unlike |
| // Intel, we don't kill CR as a side effect here |
| |
| instruct encodeKlass_not_null(iRegNNoSp dst, iRegP src) %{ |
| match(Set dst (EncodePKlass src)); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ "encode_klass_not_null $dst,$src" %} |
| |
| ins_encode %{ |
| Register src_reg = as_Register($src$$reg); |
| Register dst_reg = as_Register($dst$$reg); |
| __ encode_klass_not_null(dst_reg, src_reg); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct decodeKlass_not_null(iRegPNoSp dst, iRegN src) %{ |
| match(Set dst (DecodeNKlass src)); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ "decode_klass_not_null $dst,$src" %} |
| |
| ins_encode %{ |
| Register src_reg = as_Register($src$$reg); |
| Register dst_reg = as_Register($dst$$reg); |
| if (dst_reg != src_reg) { |
| __ decode_klass_not_null(dst_reg, src_reg); |
| } else { |
| __ decode_klass_not_null(dst_reg); |
| } |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct checkCastPP(iRegPNoSp dst) |
| %{ |
| match(Set dst (CheckCastPP dst)); |
| |
| size(0); |
| format %{ "# checkcastPP of $dst" %} |
| ins_encode(/* empty encoding */); |
| ins_pipe(pipe_class_empty); |
| %} |
| |
| instruct castPP(iRegPNoSp dst) |
| %{ |
| match(Set dst (CastPP dst)); |
| |
| size(0); |
| format %{ "# castPP of $dst" %} |
| ins_encode(/* empty encoding */); |
| ins_pipe(pipe_class_empty); |
| %} |
| |
| instruct castII(iRegI dst) |
| %{ |
| match(Set dst (CastII dst)); |
| |
| size(0); |
| format %{ "# castII of $dst" %} |
| ins_encode(/* empty encoding */); |
| ins_cost(0); |
| ins_pipe(pipe_class_empty); |
| %} |
| |
| // ============================================================================ |
| // Atomic operation instructions |
| // |
| // Intel and SPARC both implement Ideal Node LoadPLocked and |
| // Store{PIL}Conditional instructions using a normal load for the |
| // LoadPLocked and a CAS for the Store{PIL}Conditional. |
| // |
| // The ideal code appears only to use LoadPLocked/StorePLocked as a |
| // pair to lock object allocations from Eden space when not using |
| // TLABs. |
| // |
| // There does not appear to be a Load{IL}Locked Ideal Node and the |
| // Ideal code appears to use Store{IL}Conditional as an alias for CAS |
| // and to use StoreIConditional only for 32-bit and StoreLConditional |
| // only for 64-bit. |
| // |
| // We implement LoadPLocked and StorePLocked instructions using, |
| // respectively the AArch64 hw load-exclusive and store-conditional |
| // instructions. Whereas we must implement each of |
| // Store{IL}Conditional using a CAS which employs a pair of |
| // instructions comprising a load-exclusive followed by a |
| // store-conditional. |
| |
| |
| // Locked-load (linked load) of the current heap-top |
| // used when updating the eden heap top |
| // implemented using ldaxr on AArch64 |
| |
| instruct loadPLocked(iRegPNoSp dst, indirect mem) |
| %{ |
| match(Set dst (LoadPLocked mem)); |
| |
| ins_cost(VOLATILE_REF_COST); |
| |
| format %{ "ldaxr $dst, $mem\t# ptr linked acquire" %} |
| |
| ins_encode(aarch64_enc_ldaxr(dst, mem)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Conditional-store of the updated heap-top. |
| // Used during allocation of the shared heap. |
| // Sets flag (EQ) on success. |
| // implemented using stlxr on AArch64. |
| |
| instruct storePConditional(memory heap_top_ptr, iRegP oldval, iRegP newval, rFlagsReg cr) |
| %{ |
| match(Set cr (StorePConditional heap_top_ptr (Binary oldval newval))); |
| |
| ins_cost(VOLATILE_REF_COST); |
| |
| // TODO |
| // do we need to do a store-conditional release or can we just use a |
| // plain store-conditional? |
| |
| format %{ |
| "stlxr rscratch1, $newval, $heap_top_ptr\t# ptr cond release" |
| "cmpw rscratch1, zr\t# EQ on successful write" |
| %} |
| |
| ins_encode(aarch64_enc_stlxr(newval, heap_top_ptr)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| |
| // storeLConditional is used by PhaseMacroExpand::expand_lock_node |
| // when attempting to rebias a lock towards the current thread. We |
| // must use the acquire form of cmpxchg in order to guarantee acquire |
| // semantics in this case. |
| instruct storeLConditional(indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) |
| %{ |
| match(Set cr (StoreLConditional mem (Binary oldval newval))); |
| |
| ins_cost(VOLATILE_REF_COST); |
| |
| format %{ |
| "cmpxchg rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval" |
| "cmpw rscratch1, zr\t# EQ on successful write" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| // storeIConditional also has acquire semantics, for no better reason |
| // than matching storeLConditional. At the time of writing this |
| // comment storeIConditional was not used anywhere by AArch64. |
| instruct storeIConditional(indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) |
| %{ |
| match(Set cr (StoreIConditional mem (Binary oldval newval))); |
| |
| ins_cost(VOLATILE_REF_COST); |
| |
| format %{ |
| "cmpxchgw rscratch1, $mem, $oldval, $newval, $mem\t# if $mem == $oldval then $mem <-- $newval" |
| "cmpw rscratch1, zr\t# EQ on successful write" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| // standard CompareAndSwapX when we are using barriers |
| // these have higher priority than the rules selected by a predicate |
| |
| // XXX No flag versions for CompareAndSwap{I,L,P,N} because matcher |
| // can't match them |
| |
| instruct compareAndSwapB(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{ |
| |
| match(Set res (CompareAndSwapB mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| |
| effect(KILL cr); |
| |
| format %{ |
| "cmpxchgb $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval" |
| "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchgb(mem, oldval, newval), |
| aarch64_enc_cset_eq(res)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndSwapS(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{ |
| |
| match(Set res (CompareAndSwapS mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| |
| effect(KILL cr); |
| |
| format %{ |
| "cmpxchgs $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval" |
| "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchgs(mem, oldval, newval), |
| aarch64_enc_cset_eq(res)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndSwapI(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{ |
| |
| match(Set res (CompareAndSwapI mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| |
| effect(KILL cr); |
| |
| format %{ |
| "cmpxchgw $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval" |
| "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval), |
| aarch64_enc_cset_eq(res)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndSwapL(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{ |
| |
| match(Set res (CompareAndSwapL mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| |
| effect(KILL cr); |
| |
| format %{ |
| "cmpxchg $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval" |
| "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval), |
| aarch64_enc_cset_eq(res)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ |
| |
| match(Set res (CompareAndSwapP mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| |
| effect(KILL cr); |
| |
| format %{ |
| "cmpxchg $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval" |
| "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchg(mem, oldval, newval), |
| aarch64_enc_cset_eq(res)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndSwapN(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{ |
| |
| match(Set res (CompareAndSwapN mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| |
| effect(KILL cr); |
| |
| format %{ |
| "cmpxchgw $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval" |
| "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchgw(mem, oldval, newval), |
| aarch64_enc_cset_eq(res)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| // alternative CompareAndSwapX when we are eliding barriers |
| |
| instruct compareAndSwapIAcq(iRegINoSp res, indirect mem, iRegINoSp oldval, iRegINoSp newval, rFlagsReg cr) %{ |
| |
| predicate(needs_acquiring_load_exclusive(n)); |
| match(Set res (CompareAndSwapI mem (Binary oldval newval))); |
| ins_cost(VOLATILE_REF_COST); |
| |
| effect(KILL cr); |
| |
| format %{ |
| "cmpxchgw_acq $mem, $oldval, $newval\t# (int) if $mem == $oldval then $mem <-- $newval" |
| "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval), |
| aarch64_enc_cset_eq(res)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndSwapLAcq(iRegINoSp res, indirect mem, iRegLNoSp oldval, iRegLNoSp newval, rFlagsReg cr) %{ |
| |
| predicate(needs_acquiring_load_exclusive(n)); |
| match(Set res (CompareAndSwapL mem (Binary oldval newval))); |
| ins_cost(VOLATILE_REF_COST); |
| |
| effect(KILL cr); |
| |
| format %{ |
| "cmpxchg_acq $mem, $oldval, $newval\t# (long) if $mem == $oldval then $mem <-- $newval" |
| "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval), |
| aarch64_enc_cset_eq(res)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndSwapPAcq(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ |
| |
| predicate(needs_acquiring_load_exclusive(n)); |
| match(Set res (CompareAndSwapP mem (Binary oldval newval))); |
| ins_cost(VOLATILE_REF_COST); |
| |
| effect(KILL cr); |
| |
| format %{ |
| "cmpxchg_acq $mem, $oldval, $newval\t# (ptr) if $mem == $oldval then $mem <-- $newval" |
| "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchg_acq(mem, oldval, newval), |
| aarch64_enc_cset_eq(res)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndSwapNAcq(iRegINoSp res, indirect mem, iRegNNoSp oldval, iRegNNoSp newval, rFlagsReg cr) %{ |
| |
| predicate(needs_acquiring_load_exclusive(n)); |
| match(Set res (CompareAndSwapN mem (Binary oldval newval))); |
| ins_cost(VOLATILE_REF_COST); |
| |
| effect(KILL cr); |
| |
| format %{ |
| "cmpxchgw_acq $mem, $oldval, $newval\t# (narrow oop) if $mem == $oldval then $mem <-- $newval" |
| "cset $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| |
| ins_encode(aarch64_enc_cmpxchgw_acq(mem, oldval, newval), |
| aarch64_enc_cset_eq(res)); |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| |
| // --------------------------------------------------------------------- |
| |
| |
| // BEGIN This section of the file is automatically generated. Do not edit -------------- |
| |
| // Sundry CAS operations. Note that release is always true, |
| // regardless of the memory ordering of the CAS. This is because we |
| // need the volatile case to be sequentially consistent but there is |
| // no trailing StoreLoad barrier emitted by C2. Unfortunately we |
| // can't check the type of memory ordering here, so we always emit a |
| // STLXR. |
| |
| // This section is generated from aarch64_ad_cas.m4 |
| |
| |
| |
| instruct compareAndExchangeB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{ |
| match(Set res (CompareAndExchangeB mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(TEMP_DEF res, KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval" |
| %} |
| ins_encode %{ |
| __ uxtbw(rscratch2, $oldval$$Register); |
| __ cmpxchg($mem$$Register, rscratch2, $newval$$Register, |
| Assembler::byte, /*acquire*/ false, /*release*/ true, |
| /*weak*/ false, $res$$Register); |
| __ sxtbw($res$$Register, $res$$Register); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndExchangeS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{ |
| match(Set res (CompareAndExchangeS mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(TEMP_DEF res, KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval" |
| %} |
| ins_encode %{ |
| __ uxthw(rscratch2, $oldval$$Register); |
| __ cmpxchg($mem$$Register, rscratch2, $newval$$Register, |
| Assembler::halfword, /*acquire*/ false, /*release*/ true, |
| /*weak*/ false, $res$$Register); |
| __ sxthw($res$$Register, $res$$Register); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndExchangeI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{ |
| match(Set res (CompareAndExchangeI mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(TEMP_DEF res, KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval" |
| %} |
| ins_encode %{ |
| __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::word, /*acquire*/ false, /*release*/ true, |
| /*weak*/ false, $res$$Register); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndExchangeL(iRegLNoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{ |
| match(Set res (CompareAndExchangeL mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(TEMP_DEF res, KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval" |
| %} |
| ins_encode %{ |
| __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::xword, /*acquire*/ false, /*release*/ true, |
| /*weak*/ false, $res$$Register); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndExchangeN(iRegNNoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{ |
| match(Set res (CompareAndExchangeN mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(TEMP_DEF res, KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval" |
| %} |
| ins_encode %{ |
| __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::word, /*acquire*/ false, /*release*/ true, |
| /*weak*/ false, $res$$Register); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct compareAndExchangeP(iRegPNoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ |
| match(Set res (CompareAndExchangeP mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(TEMP_DEF res, KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval" |
| %} |
| ins_encode %{ |
| __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::xword, /*acquire*/ false, /*release*/ true, |
| /*weak*/ false, $res$$Register); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct weakCompareAndSwapB(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{ |
| match(Set res (WeakCompareAndSwapB mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (byte, weak) if $mem == $oldval then $mem <-- $newval" |
| "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| ins_encode %{ |
| __ uxtbw(rscratch2, $oldval$$Register); |
| __ cmpxchg($mem$$Register, rscratch2, $newval$$Register, |
| Assembler::byte, /*acquire*/ false, /*release*/ true, |
| /*weak*/ true, noreg); |
| __ csetw($res$$Register, Assembler::EQ); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct weakCompareAndSwapS(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{ |
| match(Set res (WeakCompareAndSwapS mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (short, weak) if $mem == $oldval then $mem <-- $newval" |
| "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| ins_encode %{ |
| __ uxthw(rscratch2, $oldval$$Register); |
| __ cmpxchg($mem$$Register, rscratch2, $newval$$Register, |
| Assembler::halfword, /*acquire*/ false, /*release*/ true, |
| /*weak*/ true, noreg); |
| __ csetw($res$$Register, Assembler::EQ); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct weakCompareAndSwapI(iRegINoSp res, indirect mem, iRegI oldval, iRegI newval, rFlagsReg cr) %{ |
| match(Set res (WeakCompareAndSwapI mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (int, weak) if $mem == $oldval then $mem <-- $newval" |
| "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| ins_encode %{ |
| __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::word, /*acquire*/ false, /*release*/ true, |
| /*weak*/ true, noreg); |
| __ csetw($res$$Register, Assembler::EQ); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct weakCompareAndSwapL(iRegINoSp res, indirect mem, iRegL oldval, iRegL newval, rFlagsReg cr) %{ |
| match(Set res (WeakCompareAndSwapL mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (long, weak) if $mem == $oldval then $mem <-- $newval" |
| "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| ins_encode %{ |
| __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::xword, /*acquire*/ false, /*release*/ true, |
| /*weak*/ true, noreg); |
| __ csetw($res$$Register, Assembler::EQ); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct weakCompareAndSwapN(iRegINoSp res, indirect mem, iRegN oldval, iRegN newval, rFlagsReg cr) %{ |
| match(Set res (WeakCompareAndSwapN mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (narrow oop, weak) if $mem == $oldval then $mem <-- $newval" |
| "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| ins_encode %{ |
| __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::word, /*acquire*/ false, /*release*/ true, |
| /*weak*/ true, noreg); |
| __ csetw($res$$Register, Assembler::EQ); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct weakCompareAndSwapP(iRegINoSp res, indirect mem, iRegP oldval, iRegP newval, rFlagsReg cr) %{ |
| match(Set res (WeakCompareAndSwapP mem (Binary oldval newval))); |
| ins_cost(2 * VOLATILE_REF_COST); |
| effect(KILL cr); |
| format %{ |
| "cmpxchg $res = $mem, $oldval, $newval\t# (ptr, weak) if $mem == $oldval then $mem <-- $newval" |
| "csetw $res, EQ\t# $res <-- (EQ ? 1 : 0)" |
| %} |
| ins_encode %{ |
| __ cmpxchg($mem$$Register, $oldval$$Register, $newval$$Register, |
| Assembler::xword, /*acquire*/ false, /*release*/ true, |
| /*weak*/ true, noreg); |
| __ csetw($res$$Register, Assembler::EQ); |
| %} |
| ins_pipe(pipe_slow); |
| %} |
| |
| // END This section of the file is automatically generated. Do not edit -------------- |
| // --------------------------------------------------------------------- |
| |
| instruct get_and_setI(indirect mem, iRegI newv, iRegINoSp prev) %{ |
| match(Set prev (GetAndSetI mem newv)); |
| format %{ "atomic_xchgw $prev, $newv, [$mem]" %} |
| ins_encode %{ |
| __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct get_and_setL(indirect mem, iRegL newv, iRegLNoSp prev) %{ |
| match(Set prev (GetAndSetL mem newv)); |
| format %{ "atomic_xchg $prev, $newv, [$mem]" %} |
| ins_encode %{ |
| __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct get_and_setN(indirect mem, iRegN newv, iRegINoSp prev) %{ |
| match(Set prev (GetAndSetN mem newv)); |
| format %{ "atomic_xchgw $prev, $newv, [$mem]" %} |
| ins_encode %{ |
| __ atomic_xchgw($prev$$Register, $newv$$Register, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct get_and_setP(indirect mem, iRegP newv, iRegPNoSp prev) %{ |
| match(Set prev (GetAndSetP mem newv)); |
| format %{ "atomic_xchg $prev, $newv, [$mem]" %} |
| ins_encode %{ |
| __ atomic_xchg($prev$$Register, $newv$$Register, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| |
| instruct get_and_addL(indirect mem, iRegLNoSp newval, iRegL incr) %{ |
| match(Set newval (GetAndAddL mem incr)); |
| ins_cost(INSN_COST * 10); |
| format %{ "get_and_addL $newval, [$mem], $incr" %} |
| ins_encode %{ |
| __ atomic_add($newval$$Register, $incr$$Register, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct get_and_addL_no_res(indirect mem, Universe dummy, iRegL incr) %{ |
| predicate(n->as_LoadStore()->result_not_used()); |
| match(Set dummy (GetAndAddL mem incr)); |
| ins_cost(INSN_COST * 9); |
| format %{ "get_and_addL [$mem], $incr" %} |
| ins_encode %{ |
| __ atomic_add(noreg, $incr$$Register, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct get_and_addLi(indirect mem, iRegLNoSp newval, immLAddSub incr) %{ |
| match(Set newval (GetAndAddL mem incr)); |
| ins_cost(INSN_COST * 10); |
| format %{ "get_and_addL $newval, [$mem], $incr" %} |
| ins_encode %{ |
| __ atomic_add($newval$$Register, $incr$$constant, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct get_and_addLi_no_res(indirect mem, Universe dummy, immLAddSub incr) %{ |
| predicate(n->as_LoadStore()->result_not_used()); |
| match(Set dummy (GetAndAddL mem incr)); |
| ins_cost(INSN_COST * 9); |
| format %{ "get_and_addL [$mem], $incr" %} |
| ins_encode %{ |
| __ atomic_add(noreg, $incr$$constant, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct get_and_addI(indirect mem, iRegINoSp newval, iRegIorL2I incr) %{ |
| match(Set newval (GetAndAddI mem incr)); |
| ins_cost(INSN_COST * 10); |
| format %{ "get_and_addI $newval, [$mem], $incr" %} |
| ins_encode %{ |
| __ atomic_addw($newval$$Register, $incr$$Register, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct get_and_addI_no_res(indirect mem, Universe dummy, iRegIorL2I incr) %{ |
| predicate(n->as_LoadStore()->result_not_used()); |
| match(Set dummy (GetAndAddI mem incr)); |
| ins_cost(INSN_COST * 9); |
| format %{ "get_and_addI [$mem], $incr" %} |
| ins_encode %{ |
| __ atomic_addw(noreg, $incr$$Register, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct get_and_addIi(indirect mem, iRegINoSp newval, immIAddSub incr) %{ |
| match(Set newval (GetAndAddI mem incr)); |
| ins_cost(INSN_COST * 10); |
| format %{ "get_and_addI $newval, [$mem], $incr" %} |
| ins_encode %{ |
| __ atomic_addw($newval$$Register, $incr$$constant, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct get_and_addIi_no_res(indirect mem, Universe dummy, immIAddSub incr) %{ |
| predicate(n->as_LoadStore()->result_not_used()); |
| match(Set dummy (GetAndAddI mem incr)); |
| ins_cost(INSN_COST * 9); |
| format %{ "get_and_addI [$mem], $incr" %} |
| ins_encode %{ |
| __ atomic_addw(noreg, $incr$$constant, as_Register($mem$$base)); |
| %} |
| ins_pipe(pipe_serial); |
| %} |
| |
| // Manifest a CmpL result in an integer register. |
| // (src1 < src2) ? -1 : ((src1 > src2) ? 1 : 0) |
| instruct cmpL3_reg_reg(iRegINoSp dst, iRegL src1, iRegL src2, rFlagsReg flags) |
| %{ |
| match(Set dst (CmpL3 src1 src2)); |
| effect(KILL flags); |
| |
| ins_cost(INSN_COST * 6); |
| format %{ |
| "cmp $src1, $src2" |
| "csetw $dst, ne" |
| "cnegw $dst, lt" |
| %} |
| // format %{ "CmpL3 $dst, $src1, $src2" %} |
| ins_encode %{ |
| __ cmp($src1$$Register, $src2$$Register); |
| __ csetw($dst$$Register, Assembler::NE); |
| __ cnegw($dst$$Register, $dst$$Register, Assembler::LT); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct cmpL3_reg_imm(iRegINoSp dst, iRegL src1, immLAddSub src2, rFlagsReg flags) |
| %{ |
| match(Set dst (CmpL3 src1 src2)); |
| effect(KILL flags); |
| |
| ins_cost(INSN_COST * 6); |
| format %{ |
| "cmp $src1, $src2" |
| "csetw $dst, ne" |
| "cnegw $dst, lt" |
| %} |
| ins_encode %{ |
| int32_t con = (int32_t)$src2$$constant; |
| if (con < 0) { |
| __ adds(zr, $src1$$Register, -con); |
| } else { |
| __ subs(zr, $src1$$Register, con); |
| } |
| __ csetw($dst$$Register, Assembler::NE); |
| __ cnegw($dst$$Register, $dst$$Register, Assembler::LT); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // ============================================================================ |
| // Conditional Move Instructions |
| |
| // n.b. we have identical rules for both a signed compare op (cmpOp) |
| // and an unsigned compare op (cmpOpU). it would be nice if we could |
| // define an op class which merged both inputs and use it to type the |
| // argument to a single rule. unfortunatelyt his fails because the |
| // opclass does not live up to the COND_INTER interface of its |
| // component operands. When the generic code tries to negate the |
| // operand it ends up running the generci Machoper::negate method |
| // which throws a ShouldNotHappen. So, we have to provide two flavours |
| // of each rule, one for a cmpOp and a second for a cmpOpU (sigh). |
| |
| instruct cmovI_reg_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, int" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| as_Register($src2$$reg), |
| as_Register($src1$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg_reg); |
| %} |
| |
| instruct cmovUI_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (CMoveI (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, $src2, $src1 $cmp\t# unsigned, int" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| as_Register($src2$$reg), |
| as_Register($src1$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg_reg); |
| %} |
| |
| // special cases where one arg is zero |
| |
| // n.b. this is selected in preference to the rule above because it |
| // avoids loading constant 0 into a source register |
| |
| // TODO |
| // we ought only to be able to cull one of these variants as the ideal |
| // transforms ought always to order the zero consistently (to left/right?) |
| |
| instruct cmovI_zero_reg(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{ |
| match(Set dst (CMoveI (Binary cmp cr) (Binary zero src))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, $src, zr $cmp\t# signed, int" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| zr, |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovUI_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, iRegIorL2I src) %{ |
| match(Set dst (CMoveI (Binary cmp cr) (Binary zero src))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, $src, zr $cmp\t# unsigned, int" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| zr, |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovI_reg_zero(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{ |
| match(Set dst (CMoveI (Binary cmp cr) (Binary src zero))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, zr, $src $cmp\t# signed, int" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| zr, |
| as_Register($src$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovUI_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, iRegIorL2I src, immI0 zero) %{ |
| match(Set dst (CMoveI (Binary cmp cr) (Binary src zero))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, zr, $src $cmp\t# unsigned, int" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| zr, |
| as_Register($src$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| // special case for creating a boolean 0 or 1 |
| |
| // n.b. this is selected in preference to the rule above because it |
| // avoids loading constants 0 and 1 into a source register |
| |
| instruct cmovI_reg_zero_one(cmpOp cmp, rFlagsReg cr, iRegINoSp dst, immI0 zero, immI_1 one) %{ |
| match(Set dst (CMoveI (Binary cmp cr) (Binary one zero))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csincw $dst, zr, zr $cmp\t# signed, int" %} |
| |
| ins_encode %{ |
| // equivalently |
| // cset(as_Register($dst$$reg), |
| // negate_condition((Assembler::Condition)$cmp$$cmpcode)); |
| __ csincw(as_Register($dst$$reg), |
| zr, |
| zr, |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_none); |
| %} |
| |
| instruct cmovUI_reg_zero_one(cmpOpU cmp, rFlagsRegU cr, iRegINoSp dst, immI0 zero, immI_1 one) %{ |
| match(Set dst (CMoveI (Binary cmp cr) (Binary one zero))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csincw $dst, zr, zr $cmp\t# unsigned, int" %} |
| |
| ins_encode %{ |
| // equivalently |
| // cset(as_Register($dst$$reg), |
| // negate_condition((Assembler::Condition)$cmp$$cmpcode)); |
| __ csincw(as_Register($dst$$reg), |
| zr, |
| zr, |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_none); |
| %} |
| |
| instruct cmovL_reg_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{ |
| match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, $src2, $src1 $cmp\t# signed, long" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| as_Register($src2$$reg), |
| as_Register($src1$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg_reg); |
| %} |
| |
| instruct cmovUL_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src1, iRegL src2) %{ |
| match(Set dst (CMoveL (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, long" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| as_Register($src2$$reg), |
| as_Register($src1$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg_reg); |
| %} |
| |
| // special cases where one arg is zero |
| |
| instruct cmovL_reg_zero(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, iRegL src, immL0 zero) %{ |
| match(Set dst (CMoveL (Binary cmp cr) (Binary src zero))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, zr, $src $cmp\t# signed, long" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| zr, |
| as_Register($src$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovUL_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, iRegL src, immL0 zero) %{ |
| match(Set dst (CMoveL (Binary cmp cr) (Binary src zero))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, zr, $src $cmp\t# unsigned, long" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| zr, |
| as_Register($src$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovL_zero_reg(cmpOp cmp, rFlagsReg cr, iRegLNoSp dst, immL0 zero, iRegL src) %{ |
| match(Set dst (CMoveL (Binary cmp cr) (Binary zero src))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, $src, zr $cmp\t# signed, long" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| zr, |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovUL_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegLNoSp dst, immL0 zero, iRegL src) %{ |
| match(Set dst (CMoveL (Binary cmp cr) (Binary zero src))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, $src, zr $cmp\t# unsigned, long" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| zr, |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovP_reg_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{ |
| match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, $src2, $src1 $cmp\t# signed, ptr" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| as_Register($src2$$reg), |
| as_Register($src1$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg_reg); |
| %} |
| |
| instruct cmovUP_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src1, iRegP src2) %{ |
| match(Set dst (CMoveP (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, $src2, $src1 $cmp\t# unsigned, ptr" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| as_Register($src2$$reg), |
| as_Register($src1$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg_reg); |
| %} |
| |
| // special cases where one arg is zero |
| |
| instruct cmovP_reg_zero(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, iRegP src, immP0 zero) %{ |
| match(Set dst (CMoveP (Binary cmp cr) (Binary src zero))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, zr, $src $cmp\t# signed, ptr" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| zr, |
| as_Register($src$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovUP_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, iRegP src, immP0 zero) %{ |
| match(Set dst (CMoveP (Binary cmp cr) (Binary src zero))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, zr, $src $cmp\t# unsigned, ptr" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| zr, |
| as_Register($src$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovP_zero_reg(cmpOp cmp, rFlagsReg cr, iRegPNoSp dst, immP0 zero, iRegP src) %{ |
| match(Set dst (CMoveP (Binary cmp cr) (Binary zero src))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, $src, zr $cmp\t# signed, ptr" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| zr, |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovUP_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegPNoSp dst, immP0 zero, iRegP src) %{ |
| match(Set dst (CMoveP (Binary cmp cr) (Binary zero src))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "csel $dst, $src, zr $cmp\t# unsigned, ptr" %} |
| |
| ins_encode %{ |
| __ csel(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| zr, |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovN_reg_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{ |
| match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| as_Register($src2$$reg), |
| as_Register($src1$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg_reg); |
| %} |
| |
| instruct cmovUN_reg_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src1, iRegN src2) %{ |
| match(Set dst (CMoveN (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, $src2, $src1 $cmp\t# signed, compressed ptr" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| as_Register($src2$$reg), |
| as_Register($src1$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg_reg); |
| %} |
| |
| // special cases where one arg is zero |
| |
| instruct cmovN_reg_zero(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, iRegN src, immN0 zero) %{ |
| match(Set dst (CMoveN (Binary cmp cr) (Binary src zero))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, zr, $src $cmp\t# signed, compressed ptr" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| zr, |
| as_Register($src$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovUN_reg_zero(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, iRegN src, immN0 zero) %{ |
| match(Set dst (CMoveN (Binary cmp cr) (Binary src zero))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, zr, $src $cmp\t# unsigned, compressed ptr" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| zr, |
| as_Register($src$$reg), |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovN_zero_reg(cmpOp cmp, rFlagsReg cr, iRegNNoSp dst, immN0 zero, iRegN src) %{ |
| match(Set dst (CMoveN (Binary cmp cr) (Binary zero src))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, $src, zr $cmp\t# signed, compressed ptr" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| zr, |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovUN_zero_reg(cmpOpU cmp, rFlagsRegU cr, iRegNNoSp dst, immN0 zero, iRegN src) %{ |
| match(Set dst (CMoveN (Binary cmp cr) (Binary zero src))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cselw $dst, $src, zr $cmp\t# unsigned, compressed ptr" %} |
| |
| ins_encode %{ |
| __ cselw(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| zr, |
| (Assembler::Condition)$cmp$$cmpcode); |
| %} |
| |
| ins_pipe(icond_reg); |
| %} |
| |
| instruct cmovF_reg(cmpOp cmp, rFlagsReg cr, vRegF dst, vRegF src1, vRegF src2) |
| %{ |
| match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 3); |
| |
| format %{ "fcsels $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %} |
| ins_encode %{ |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| __ fcsels(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src1$$reg), |
| cond); |
| %} |
| |
| ins_pipe(fp_cond_reg_reg_s); |
| %} |
| |
| instruct cmovUF_reg(cmpOpU cmp, rFlagsRegU cr, vRegF dst, vRegF src1, vRegF src2) |
| %{ |
| match(Set dst (CMoveF (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 3); |
| |
| format %{ "fcsels $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %} |
| ins_encode %{ |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| __ fcsels(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src1$$reg), |
| cond); |
| %} |
| |
| ins_pipe(fp_cond_reg_reg_s); |
| %} |
| |
| instruct cmovD_reg(cmpOp cmp, rFlagsReg cr, vRegD dst, vRegD src1, vRegD src2) |
| %{ |
| match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 3); |
| |
| format %{ "fcseld $dst, $src1, $src2, $cmp\t# signed cmove float\n\t" %} |
| ins_encode %{ |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| __ fcseld(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src1$$reg), |
| cond); |
| %} |
| |
| ins_pipe(fp_cond_reg_reg_d); |
| %} |
| |
| instruct cmovUD_reg(cmpOpU cmp, rFlagsRegU cr, vRegD dst, vRegD src1, vRegD src2) |
| %{ |
| match(Set dst (CMoveD (Binary cmp cr) (Binary src1 src2))); |
| |
| ins_cost(INSN_COST * 3); |
| |
| format %{ "fcseld $dst, $src1, $src2, $cmp\t# unsigned cmove float\n\t" %} |
| ins_encode %{ |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| __ fcseld(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src1$$reg), |
| cond); |
| %} |
| |
| ins_pipe(fp_cond_reg_reg_d); |
| %} |
| |
| // ============================================================================ |
| // Arithmetic Instructions |
| // |
| |
| // Integer Addition |
| |
| // TODO |
| // these currently employ operations which do not set CR and hence are |
| // not flagged as killing CR but we would like to isolate the cases |
| // where we want to set flags from those where we don't. need to work |
| // out how to do that. |
| |
| instruct addI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (AddI src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "addw $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ addw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct addI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{ |
| match(Set dst (AddI src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "addw $dst, $src1, $src2" %} |
| |
| // use opcode to indicate that this is an add not a sub |
| opcode(0x0); |
| |
| ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2)); |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| instruct addI_reg_imm_i2l(iRegINoSp dst, iRegL src1, immIAddSub src2) %{ |
| match(Set dst (AddI (ConvL2I src1) src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "addw $dst, $src1, $src2" %} |
| |
| // use opcode to indicate that this is an add not a sub |
| opcode(0x0); |
| |
| ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2)); |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| // Pointer Addition |
| instruct addP_reg_reg(iRegPNoSp dst, iRegP src1, iRegL src2) %{ |
| match(Set dst (AddP src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2\t# ptr" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct addP_reg_reg_ext(iRegPNoSp dst, iRegP src1, iRegIorL2I src2) %{ |
| match(Set dst (AddP src1 (ConvI2L src2))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, sxtw\t# ptr" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtw); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct addP_reg_reg_lsl(iRegPNoSp dst, iRegP src1, iRegL src2, immIScale scale) %{ |
| match(Set dst (AddP src1 (LShiftL src2 scale))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, LShiftL $scale\t# ptr" %} |
| |
| ins_encode %{ |
| __ lea(as_Register($dst$$reg), |
| Address(as_Register($src1$$reg), as_Register($src2$$reg), |
| Address::lsl($scale$$constant))); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct addP_reg_reg_ext_shift(iRegPNoSp dst, iRegP src1, iRegIorL2I src2, immIScale scale) %{ |
| match(Set dst (AddP src1 (LShiftL (ConvI2L src2) scale))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, I2L $scale\t# ptr" %} |
| |
| ins_encode %{ |
| __ lea(as_Register($dst$$reg), |
| Address(as_Register($src1$$reg), as_Register($src2$$reg), |
| Address::sxtw($scale$$constant))); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct lshift_ext(iRegLNoSp dst, iRegIorL2I src, immI scale, rFlagsReg cr) %{ |
| match(Set dst (LShiftL (ConvI2L src) scale)); |
| |
| ins_cost(INSN_COST); |
| format %{ "sbfiz $dst, $src, $scale & 63, -$scale & 63\t" %} |
| |
| ins_encode %{ |
| __ sbfiz(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| $scale$$constant & 63, MIN(32, (-$scale$$constant) & 63)); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // Pointer Immediate Addition |
| // n.b. this needs to be more expensive than using an indirect memory |
| // operand |
| instruct addP_reg_imm(iRegPNoSp dst, iRegP src1, immLAddSub src2) %{ |
| match(Set dst (AddP src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2\t# ptr" %} |
| |
| // use opcode to indicate that this is an add not a sub |
| opcode(0x0); |
| |
| ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) ); |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| // Long Addition |
| instruct addL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{ |
| |
| match(Set dst (AddL src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| // No constant pool entries requiredLong Immediate Addition. |
| instruct addL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{ |
| match(Set dst (AddL src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2" %} |
| |
| // use opcode to indicate that this is an add not a sub |
| opcode(0x0); |
| |
| ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) ); |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| // Integer Subtraction |
| instruct subI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (SubI src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "subw $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ subw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| // Immediate Subtraction |
| instruct subI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immIAddSub src2) %{ |
| match(Set dst (SubI src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "subw $dst, $src1, $src2" %} |
| |
| // use opcode to indicate that this is a sub not an add |
| opcode(0x1); |
| |
| ins_encode(aarch64_enc_addsubw_imm(dst, src1, src2)); |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| // Long Subtraction |
| instruct subL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{ |
| |
| match(Set dst (SubL src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "sub $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| // No constant pool entries requiredLong Immediate Subtraction. |
| instruct subL_reg_imm(iRegLNoSp dst, iRegL src1, immLAddSub src2) %{ |
| match(Set dst (SubL src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "sub$dst, $src1, $src2" %} |
| |
| // use opcode to indicate that this is a sub not an add |
| opcode(0x1); |
| |
| ins_encode( aarch64_enc_addsub_imm(dst, src1, src2) ); |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| // Integer Negation (special case for sub) |
| |
| instruct negI_reg(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) %{ |
| match(Set dst (SubI zero src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "negw $dst, $src\t# int" %} |
| |
| ins_encode %{ |
| __ negw(as_Register($dst$$reg), |
| as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| // Long Negation |
| |
| instruct negL_reg(iRegLNoSp dst, iRegL src, immL0 zero, rFlagsReg cr) %{ |
| match(Set dst (SubL zero src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "neg $dst, $src\t# long" %} |
| |
| ins_encode %{ |
| __ neg(as_Register($dst$$reg), |
| as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| // Integer Multiply |
| |
| instruct mulI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (MulI src1 src2)); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ "mulw $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ mulw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(imul_reg_reg); |
| %} |
| |
| instruct smulI(iRegLNoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (MulL (ConvI2L src1) (ConvI2L src2))); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ "smull $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ smull(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(imul_reg_reg); |
| %} |
| |
| // Long Multiply |
| |
| instruct mulL(iRegLNoSp dst, iRegL src1, iRegL src2) %{ |
| match(Set dst (MulL src1 src2)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "mul $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ mul(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(lmul_reg_reg); |
| %} |
| |
| instruct mulHiL_rReg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) |
| %{ |
| match(Set dst (MulHiL src1 src2)); |
| |
| ins_cost(INSN_COST * 7); |
| format %{ "smulh $dst, $src1, $src2, \t# mulhi" %} |
| |
| ins_encode %{ |
| __ smulh(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(lmul_reg_reg); |
| %} |
| |
| // Combined Integer Multiply & Add/Sub |
| |
| instruct maddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{ |
| match(Set dst (AddI src3 (MulI src1 src2))); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ "madd $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| __ maddw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| as_Register($src3$$reg)); |
| %} |
| |
| ins_pipe(imac_reg_reg); |
| %} |
| |
| instruct msubI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, iRegIorL2I src3) %{ |
| match(Set dst (SubI src3 (MulI src1 src2))); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ "msub $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| __ msubw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| as_Register($src3$$reg)); |
| %} |
| |
| ins_pipe(imac_reg_reg); |
| %} |
| |
| // Combined Long Multiply & Add/Sub |
| |
| instruct maddL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{ |
| match(Set dst (AddL src3 (MulL src1 src2))); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "madd $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| __ madd(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| as_Register($src3$$reg)); |
| %} |
| |
| ins_pipe(lmac_reg_reg); |
| %} |
| |
| instruct msubL(iRegLNoSp dst, iRegL src1, iRegL src2, iRegL src3) %{ |
| match(Set dst (SubL src3 (MulL src1 src2))); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "msub $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| __ msub(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| as_Register($src3$$reg)); |
| %} |
| |
| ins_pipe(lmac_reg_reg); |
| %} |
| |
| // Integer Divide |
| |
| instruct divI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (DivI src1 src2)); |
| |
| ins_cost(INSN_COST * 19); |
| format %{ "sdivw $dst, $src1, $src2" %} |
| |
| ins_encode(aarch64_enc_divw(dst, src1, src2)); |
| ins_pipe(idiv_reg_reg); |
| %} |
| |
| instruct signExtract(iRegINoSp dst, iRegIorL2I src1, immI_31 div1, immI_31 div2) %{ |
| match(Set dst (URShiftI (RShiftI src1 div1) div2)); |
| ins_cost(INSN_COST); |
| format %{ "lsrw $dst, $src1, $div1" %} |
| ins_encode %{ |
| __ lsrw(as_Register($dst$$reg), as_Register($src1$$reg), 31); |
| %} |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| instruct div2Round(iRegINoSp dst, iRegIorL2I src, immI_31 div1, immI_31 div2) %{ |
| match(Set dst (AddI src (URShiftI (RShiftI src div1) div2))); |
| ins_cost(INSN_COST); |
| format %{ "addw $dst, $src, LSR $div1" %} |
| |
| ins_encode %{ |
| __ addw(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| as_Register($src$$reg), |
| Assembler::LSR, 31); |
| %} |
| ins_pipe(ialu_reg); |
| %} |
| |
| // Long Divide |
| |
| instruct divL(iRegLNoSp dst, iRegL src1, iRegL src2) %{ |
| match(Set dst (DivL src1 src2)); |
| |
| ins_cost(INSN_COST * 35); |
| format %{ "sdiv $dst, $src1, $src2" %} |
| |
| ins_encode(aarch64_enc_div(dst, src1, src2)); |
| ins_pipe(ldiv_reg_reg); |
| %} |
| |
| instruct signExtractL(iRegLNoSp dst, iRegL src1, immI_63 div1, immI_63 div2) %{ |
| match(Set dst (URShiftL (RShiftL src1 div1) div2)); |
| ins_cost(INSN_COST); |
| format %{ "lsr $dst, $src1, $div1" %} |
| ins_encode %{ |
| __ lsr(as_Register($dst$$reg), as_Register($src1$$reg), 63); |
| %} |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| instruct div2RoundL(iRegLNoSp dst, iRegL src, immI_63 div1, immI_63 div2) %{ |
| match(Set dst (AddL src (URShiftL (RShiftL src div1) div2))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src, $div1" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| as_Register($src$$reg), |
| Assembler::LSR, 63); |
| %} |
| ins_pipe(ialu_reg); |
| %} |
| |
| // Integer Remainder |
| |
| instruct modI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (ModI src1 src2)); |
| |
| ins_cost(INSN_COST * 22); |
| format %{ "sdivw rscratch1, $src1, $src2\n\t" |
| "msubw($dst, rscratch1, $src2, $src1" %} |
| |
| ins_encode(aarch64_enc_modw(dst, src1, src2)); |
| ins_pipe(idiv_reg_reg); |
| %} |
| |
| // Long Remainder |
| |
| instruct modL(iRegLNoSp dst, iRegL src1, iRegL src2) %{ |
| match(Set dst (ModL src1 src2)); |
| |
| ins_cost(INSN_COST * 38); |
| format %{ "sdiv rscratch1, $src1, $src2\n" |
| "msub($dst, rscratch1, $src2, $src1" %} |
| |
| ins_encode(aarch64_enc_mod(dst, src1, src2)); |
| ins_pipe(ldiv_reg_reg); |
| %} |
| |
| // Integer Shifts |
| |
| // Shift Left Register |
| instruct lShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (LShiftI src1 src2)); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "lslvw $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ lslvw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg_vshift); |
| %} |
| |
| // Shift Left Immediate |
| instruct lShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{ |
| match(Set dst (LShiftI src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "lslw $dst, $src1, ($src2 & 0x1f)" %} |
| |
| ins_encode %{ |
| __ lslw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| $src2$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // Shift Right Logical Register |
| instruct urShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (URShiftI src1 src2)); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "lsrvw $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ lsrvw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg_vshift); |
| %} |
| |
| // Shift Right Logical Immediate |
| instruct urShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{ |
| match(Set dst (URShiftI src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "lsrw $dst, $src1, ($src2 & 0x1f)" %} |
| |
| ins_encode %{ |
| __ lsrw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| $src2$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // Shift Right Arithmetic Register |
| instruct rShiftI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (RShiftI src1 src2)); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "asrvw $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ asrvw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg_vshift); |
| %} |
| |
| // Shift Right Arithmetic Immediate |
| instruct rShiftI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immI src2) %{ |
| match(Set dst (RShiftI src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "asrw $dst, $src1, ($src2 & 0x1f)" %} |
| |
| ins_encode %{ |
| __ asrw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| $src2$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // Combined Int Mask and Right Shift (using UBFM) |
| // TODO |
| |
| // Long Shifts |
| |
| // Shift Left Register |
| instruct lShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{ |
| match(Set dst (LShiftL src1 src2)); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "lslv $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ lslv(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg_vshift); |
| %} |
| |
| // Shift Left Immediate |
| instruct lShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{ |
| match(Set dst (LShiftL src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "lsl $dst, $src1, ($src2 & 0x3f)" %} |
| |
| ins_encode %{ |
| __ lsl(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| $src2$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // Shift Right Logical Register |
| instruct urShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{ |
| match(Set dst (URShiftL src1 src2)); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "lsrv $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ lsrv(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg_vshift); |
| %} |
| |
| // Shift Right Logical Immediate |
| instruct urShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{ |
| match(Set dst (URShiftL src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "lsr $dst, $src1, ($src2 & 0x3f)" %} |
| |
| ins_encode %{ |
| __ lsr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| $src2$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // A special-case pattern for card table stores. |
| instruct urShiftP_reg_imm(iRegLNoSp dst, iRegP src1, immI src2) %{ |
| match(Set dst (URShiftL (CastP2X src1) src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "lsr $dst, p2x($src1), ($src2 & 0x3f)" %} |
| |
| ins_encode %{ |
| __ lsr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| $src2$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // Shift Right Arithmetic Register |
| instruct rShiftL_reg_reg(iRegLNoSp dst, iRegL src1, iRegIorL2I src2) %{ |
| match(Set dst (RShiftL src1 src2)); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "asrv $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ asrv(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg_vshift); |
| %} |
| |
| // Shift Right Arithmetic Immediate |
| instruct rShiftL_reg_imm(iRegLNoSp dst, iRegL src1, immI src2) %{ |
| match(Set dst (RShiftL src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "asr $dst, $src1, ($src2 & 0x3f)" %} |
| |
| ins_encode %{ |
| __ asr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| $src2$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // BEGIN This section of the file is automatically generated. Do not edit -------------- |
| |
| instruct regL_not_reg(iRegLNoSp dst, |
| iRegL src1, immL_M1 m1, |
| rFlagsReg cr) %{ |
| match(Set dst (XorL src1 m1)); |
| ins_cost(INSN_COST); |
| format %{ "eon $dst, $src1, zr" %} |
| |
| ins_encode %{ |
| __ eon(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| zr, |
| Assembler::LSL, 0); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| instruct regI_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, immI_M1 m1, |
| rFlagsReg cr) %{ |
| match(Set dst (XorI src1 m1)); |
| ins_cost(INSN_COST); |
| format %{ "eonw $dst, $src1, zr" %} |
| |
| ins_encode %{ |
| __ eonw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| zr, |
| Assembler::LSL, 0); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct AndI_reg_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1, |
| rFlagsReg cr) %{ |
| match(Set dst (AndI src1 (XorI src2 m1))); |
| ins_cost(INSN_COST); |
| format %{ "bicw $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ bicw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, 0); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AndL_reg_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, immL_M1 m1, |
| rFlagsReg cr) %{ |
| match(Set dst (AndL src1 (XorL src2 m1))); |
| ins_cost(INSN_COST); |
| format %{ "bic $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ bic(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, 0); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct OrI_reg_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1, |
| rFlagsReg cr) %{ |
| match(Set dst (OrI src1 (XorI src2 m1))); |
| ins_cost(INSN_COST); |
| format %{ "ornw $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ ornw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, 0); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct OrL_reg_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, immL_M1 m1, |
| rFlagsReg cr) %{ |
| match(Set dst (OrL src1 (XorL src2 m1))); |
| ins_cost(INSN_COST); |
| format %{ "orn $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ orn(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, 0); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct XorI_reg_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, immI_M1 m1, |
| rFlagsReg cr) %{ |
| match(Set dst (XorI m1 (XorI src2 src1))); |
| ins_cost(INSN_COST); |
| format %{ "eonw $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ eonw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, 0); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct XorL_reg_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, immL_M1 m1, |
| rFlagsReg cr) %{ |
| match(Set dst (XorL m1 (XorL src2 src1))); |
| ins_cost(INSN_COST); |
| format %{ "eon $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ eon(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, 0); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AndI_reg_URShift_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, immI_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (AndI src1 (XorI(URShiftI src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "bicw $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ bicw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AndL_reg_URShift_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, immL_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (AndL src1 (XorL(URShiftL src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "bic $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ bic(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AndI_reg_RShift_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, immI_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (AndI src1 (XorI(RShiftI src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "bicw $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ bicw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AndL_reg_RShift_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, immL_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (AndL src1 (XorL(RShiftL src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "bic $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ bic(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AndI_reg_LShift_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, immI_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (AndI src1 (XorI(LShiftI src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "bicw $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ bicw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AndL_reg_LShift_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, immL_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (AndL src1 (XorL(LShiftL src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "bic $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ bic(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorI_reg_URShift_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, immI_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (XorI src4 (XorI(URShiftI src2 src3) src1))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eonw $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ eonw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorL_reg_URShift_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, immL_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (XorL src4 (XorL(URShiftL src2 src3) src1))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eon $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ eon(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorI_reg_RShift_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, immI_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (XorI src4 (XorI(RShiftI src2 src3) src1))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eonw $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ eonw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorL_reg_RShift_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, immL_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (XorL src4 (XorL(RShiftL src2 src3) src1))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eon $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ eon(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorI_reg_LShift_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, immI_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (XorI src4 (XorI(LShiftI src2 src3) src1))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eonw $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ eonw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorL_reg_LShift_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, immL_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (XorL src4 (XorL(LShiftL src2 src3) src1))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eon $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ eon(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrI_reg_URShift_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, immI_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (OrI src1 (XorI(URShiftI src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "ornw $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ ornw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrL_reg_URShift_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, immL_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (OrL src1 (XorL(URShiftL src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "orn $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ orn(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrI_reg_RShift_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, immI_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (OrI src1 (XorI(RShiftI src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "ornw $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ ornw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrL_reg_RShift_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, immL_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (OrL src1 (XorL(RShiftL src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "orn $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ orn(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrI_reg_LShift_not_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, immI_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (OrI src1 (XorI(LShiftI src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "ornw $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ ornw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrL_reg_LShift_not_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, immL_M1 src4, rFlagsReg cr) %{ |
| match(Set dst (OrL src1 (XorL(LShiftL src2 src3) src4))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "orn $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ orn(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AndI_reg_URShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AndI src1 (URShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "andw $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ andw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AndL_reg_URShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AndL src1 (URShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "andr $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ andr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AndI_reg_RShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AndI src1 (RShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "andw $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ andw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AndL_reg_RShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AndL src1 (RShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "andr $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ andr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AndI_reg_LShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AndI src1 (LShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "andw $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ andw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AndL_reg_LShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AndL src1 (LShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "andr $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ andr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorI_reg_URShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (XorI src1 (URShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eorw $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ eorw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorL_reg_URShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (XorL src1 (URShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eor $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ eor(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorI_reg_RShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (XorI src1 (RShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eorw $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ eorw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorL_reg_RShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (XorL src1 (RShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eor $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ eor(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorI_reg_LShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (XorI src1 (LShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eorw $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ eorw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct XorL_reg_LShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (XorL src1 (LShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "eor $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ eor(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrI_reg_URShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (OrI src1 (URShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "orrw $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ orrw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrL_reg_URShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (OrL src1 (URShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "orr $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ orr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrI_reg_RShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (OrI src1 (RShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "orrw $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ orrw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrL_reg_RShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (OrL src1 (RShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "orr $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ orr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrI_reg_LShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (OrI src1 (LShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "orrw $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ orrw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct OrL_reg_LShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (OrL src1 (LShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "orr $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ orr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddI_reg_URShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AddI src1 (URShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "addw $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ addw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddL_reg_URShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AddL src1 (URShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddI_reg_RShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AddI src1 (RShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "addw $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ addw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddL_reg_RShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AddL src1 (RShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddI_reg_LShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AddI src1 (LShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "addw $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ addw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddL_reg_LShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (AddL src1 (LShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubI_reg_URShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (SubI src1 (URShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "subw $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ subw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubL_reg_URShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (SubL src1 (URShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "sub $dst, $src1, $src2, LSR $src3" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubI_reg_RShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (SubI src1 (RShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "subw $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ subw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubL_reg_RShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (SubL src1 (RShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "sub $dst, $src1, $src2, ASR $src3" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::ASR, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubI_reg_LShift_reg(iRegINoSp dst, |
| iRegIorL2I src1, iRegIorL2I src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (SubI src1 (LShiftI src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "subw $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ subw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x1f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubL_reg_LShift_reg(iRegLNoSp dst, |
| iRegL src1, iRegL src2, |
| immI src3, rFlagsReg cr) %{ |
| match(Set dst (SubL src1 (LShiftL src2 src3))); |
| |
| ins_cost(1.9 * INSN_COST); |
| format %{ "sub $dst, $src1, $src2, LSL $src3" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LSL, |
| $src3$$constant & 0x3f); |
| %} |
| |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| |
| |
| // Shift Left followed by Shift Right. |
| // This idiom is used by the compiler for the i2b bytecode etc. |
| instruct sbfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count) |
| %{ |
| match(Set dst (RShiftL (LShiftL src lshift_count) rshift_count)); |
| // Make sure we are not going to exceed what sbfm can do. |
| predicate((unsigned int)n->in(2)->get_int() <= 63 |
| && (unsigned int)n->in(1)->in(2)->get_int() <= 63); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "sbfm $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %} |
| ins_encode %{ |
| int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant; |
| int s = 63 - lshift; |
| int r = (rshift - lshift) & 63; |
| __ sbfm(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| r, s); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // Shift Left followed by Shift Right. |
| // This idiom is used by the compiler for the i2b bytecode etc. |
| instruct sbfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count) |
| %{ |
| match(Set dst (RShiftI (LShiftI src lshift_count) rshift_count)); |
| // Make sure we are not going to exceed what sbfmw can do. |
| predicate((unsigned int)n->in(2)->get_int() <= 31 |
| && (unsigned int)n->in(1)->in(2)->get_int() <= 31); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "sbfmw $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %} |
| ins_encode %{ |
| int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant; |
| int s = 31 - lshift; |
| int r = (rshift - lshift) & 31; |
| __ sbfmw(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| r, s); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // Shift Left followed by Shift Right. |
| // This idiom is used by the compiler for the i2b bytecode etc. |
| instruct ubfmL(iRegLNoSp dst, iRegL src, immI lshift_count, immI rshift_count) |
| %{ |
| match(Set dst (URShiftL (LShiftL src lshift_count) rshift_count)); |
| // Make sure we are not going to exceed what ubfm can do. |
| predicate((unsigned int)n->in(2)->get_int() <= 63 |
| && (unsigned int)n->in(1)->in(2)->get_int() <= 63); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "ubfm $dst, $src, $rshift_count - $lshift_count, #63 - $lshift_count" %} |
| ins_encode %{ |
| int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant; |
| int s = 63 - lshift; |
| int r = (rshift - lshift) & 63; |
| __ ubfm(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| r, s); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // Shift Left followed by Shift Right. |
| // This idiom is used by the compiler for the i2b bytecode etc. |
| instruct ubfmwI(iRegINoSp dst, iRegIorL2I src, immI lshift_count, immI rshift_count) |
| %{ |
| match(Set dst (URShiftI (LShiftI src lshift_count) rshift_count)); |
| // Make sure we are not going to exceed what ubfmw can do. |
| predicate((unsigned int)n->in(2)->get_int() <= 31 |
| && (unsigned int)n->in(1)->in(2)->get_int() <= 31); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "ubfmw $dst, $src, $rshift_count - $lshift_count, #31 - $lshift_count" %} |
| ins_encode %{ |
| int lshift = $lshift_count$$constant, rshift = $rshift_count$$constant; |
| int s = 31 - lshift; |
| int r = (rshift - lshift) & 31; |
| __ ubfmw(as_Register($dst$$reg), |
| as_Register($src$$reg), |
| r, s); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| // Bitfield extract with shift & mask |
| |
| instruct ubfxwI(iRegINoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask) |
| %{ |
| match(Set dst (AndI (URShiftI src rshift) mask)); |
| |
| ins_cost(INSN_COST); |
| format %{ "ubfxw $dst, $src, $rshift, $mask" %} |
| ins_encode %{ |
| int rshift = $rshift$$constant; |
| long mask = $mask$$constant; |
| int width = exact_log2(mask+1); |
| __ ubfxw(as_Register($dst$$reg), |
| as_Register($src$$reg), rshift, width); |
| %} |
| ins_pipe(ialu_reg_shift); |
| %} |
| instruct ubfxL(iRegLNoSp dst, iRegL src, immI rshift, immL_bitmask mask) |
| %{ |
| match(Set dst (AndL (URShiftL src rshift) mask)); |
| |
| ins_cost(INSN_COST); |
| format %{ "ubfx $dst, $src, $rshift, $mask" %} |
| ins_encode %{ |
| int rshift = $rshift$$constant; |
| long mask = $mask$$constant; |
| int width = exact_log2(mask+1); |
| __ ubfx(as_Register($dst$$reg), |
| as_Register($src$$reg), rshift, width); |
| %} |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // We can use ubfx when extending an And with a mask when we know mask |
| // is positive. We know that because immI_bitmask guarantees it. |
| instruct ubfxIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI rshift, immI_bitmask mask) |
| %{ |
| match(Set dst (ConvI2L (AndI (URShiftI src rshift) mask))); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "ubfx $dst, $src, $rshift, $mask" %} |
| ins_encode %{ |
| int rshift = $rshift$$constant; |
| long mask = $mask$$constant; |
| int width = exact_log2(mask+1); |
| __ ubfx(as_Register($dst$$reg), |
| as_Register($src$$reg), rshift, width); |
| %} |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // We can use ubfiz when masking by a positive number and then left shifting the result. |
| // We know that the mask is positive because immI_bitmask guarantees it. |
| instruct ubfizwI(iRegINoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask) |
| %{ |
| match(Set dst (LShiftI (AndI src mask) lshift)); |
| predicate((unsigned int)n->in(2)->get_int() <= 31 && |
| (exact_log2(n->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= (31+1)); |
| |
| ins_cost(INSN_COST); |
| format %{ "ubfizw $dst, $src, $lshift, $mask" %} |
| ins_encode %{ |
| int lshift = $lshift$$constant; |
| long mask = $mask$$constant; |
| int width = exact_log2(mask+1); |
| __ ubfizw(as_Register($dst$$reg), |
| as_Register($src$$reg), lshift, width); |
| %} |
| ins_pipe(ialu_reg_shift); |
| %} |
| // We can use ubfiz when masking by a positive number and then left shifting the result. |
| // We know that the mask is positive because immL_bitmask guarantees it. |
| instruct ubfizL(iRegLNoSp dst, iRegL src, immI lshift, immL_bitmask mask) |
| %{ |
| match(Set dst (LShiftL (AndL src mask) lshift)); |
| predicate((unsigned int)n->in(2)->get_int() <= 63 && |
| (exact_log2_long(n->in(1)->in(2)->get_long()+1) + (unsigned int)n->in(2)->get_int()) <= (63+1)); |
| |
| ins_cost(INSN_COST); |
| format %{ "ubfiz $dst, $src, $lshift, $mask" %} |
| ins_encode %{ |
| int lshift = $lshift$$constant; |
| long mask = $mask$$constant; |
| int width = exact_log2(mask+1); |
| __ ubfiz(as_Register($dst$$reg), |
| as_Register($src$$reg), lshift, width); |
| %} |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // If there is a convert I to L block between and AndI and a LShiftL, we can also match ubfiz |
| instruct ubfizIConvI2L(iRegLNoSp dst, iRegIorL2I src, immI lshift, immI_bitmask mask) |
| %{ |
| match(Set dst (LShiftL (ConvI2L(AndI src mask)) lshift)); |
| predicate((unsigned int)n->in(2)->get_int() <= 31 && |
| (exact_log2((unsigned int)n->in(1)->in(1)->in(2)->get_int()+1) + (unsigned int)n->in(2)->get_int()) <= 32); |
| |
| ins_cost(INSN_COST); |
| format %{ "ubfiz $dst, $src, $lshift, $mask" %} |
| ins_encode %{ |
| int lshift = $lshift$$constant; |
| long mask = $mask$$constant; |
| int width = exact_log2(mask+1); |
| __ ubfiz(as_Register($dst$$reg), |
| as_Register($src$$reg), lshift, width); |
| %} |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // Rotations |
| |
| instruct extrOrL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr) |
| %{ |
| match(Set dst (OrL (LShiftL src1 lshift) (URShiftL src2 rshift))); |
| predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63)); |
| |
| ins_cost(INSN_COST); |
| format %{ "extr $dst, $src1, $src2, #$rshift" %} |
| |
| ins_encode %{ |
| __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg), |
| $rshift$$constant & 63); |
| %} |
| ins_pipe(ialu_reg_reg_extr); |
| %} |
| |
| instruct extrOrI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr) |
| %{ |
| match(Set dst (OrI (LShiftI src1 lshift) (URShiftI src2 rshift))); |
| predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31)); |
| |
| ins_cost(INSN_COST); |
| format %{ "extr $dst, $src1, $src2, #$rshift" %} |
| |
| ins_encode %{ |
| __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg), |
| $rshift$$constant & 31); |
| %} |
| ins_pipe(ialu_reg_reg_extr); |
| %} |
| |
| instruct extrAddL(iRegLNoSp dst, iRegL src1, iRegL src2, immI lshift, immI rshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL (LShiftL src1 lshift) (URShiftL src2 rshift))); |
| predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 63)); |
| |
| ins_cost(INSN_COST); |
| format %{ "extr $dst, $src1, $src2, #$rshift" %} |
| |
| ins_encode %{ |
| __ extr(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg), |
| $rshift$$constant & 63); |
| %} |
| ins_pipe(ialu_reg_reg_extr); |
| %} |
| |
| instruct extrAddI(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI lshift, immI rshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddI (LShiftI src1 lshift) (URShiftI src2 rshift))); |
| predicate(0 == ((n->in(1)->in(2)->get_int() + n->in(2)->in(2)->get_int()) & 31)); |
| |
| ins_cost(INSN_COST); |
| format %{ "extr $dst, $src1, $src2, #$rshift" %} |
| |
| ins_encode %{ |
| __ extrw(as_Register($dst$$reg), as_Register($src1$$reg), as_Register($src2$$reg), |
| $rshift$$constant & 31); |
| %} |
| ins_pipe(ialu_reg_reg_extr); |
| %} |
| |
| |
| // rol expander |
| |
| instruct rolL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr) |
| %{ |
| effect(DEF dst, USE src, USE shift); |
| |
| format %{ "rol $dst, $src, $shift" %} |
| ins_cost(INSN_COST * 3); |
| ins_encode %{ |
| __ subw(rscratch1, zr, as_Register($shift$$reg)); |
| __ rorv(as_Register($dst$$reg), as_Register($src$$reg), |
| rscratch1); |
| %} |
| ins_pipe(ialu_reg_reg_vshift); |
| %} |
| |
| // rol expander |
| |
| instruct rolI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr) |
| %{ |
| effect(DEF dst, USE src, USE shift); |
| |
| format %{ "rol $dst, $src, $shift" %} |
| ins_cost(INSN_COST * 3); |
| ins_encode %{ |
| __ subw(rscratch1, zr, as_Register($shift$$reg)); |
| __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), |
| rscratch1); |
| %} |
| ins_pipe(ialu_reg_reg_vshift); |
| %} |
| |
| instruct rolL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr) |
| %{ |
| match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c_64 shift)))); |
| |
| expand %{ |
| rolL_rReg(dst, src, shift, cr); |
| %} |
| %} |
| |
| instruct rolL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr) |
| %{ |
| match(Set dst (OrL (LShiftL src shift) (URShiftL src (SubI c0 shift)))); |
| |
| expand %{ |
| rolL_rReg(dst, src, shift, cr); |
| %} |
| %} |
| |
| instruct rolI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr) |
| %{ |
| match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c_32 shift)))); |
| |
| expand %{ |
| rolI_rReg(dst, src, shift, cr); |
| %} |
| %} |
| |
| instruct rolI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr) |
| %{ |
| match(Set dst (OrI (LShiftI src shift) (URShiftI src (SubI c0 shift)))); |
| |
| expand %{ |
| rolI_rReg(dst, src, shift, cr); |
| %} |
| %} |
| |
| // ror expander |
| |
| instruct rorL_rReg(iRegLNoSp dst, iRegL src, iRegI shift, rFlagsReg cr) |
| %{ |
| effect(DEF dst, USE src, USE shift); |
| |
| format %{ "ror $dst, $src, $shift" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ rorv(as_Register($dst$$reg), as_Register($src$$reg), |
| as_Register($shift$$reg)); |
| %} |
| ins_pipe(ialu_reg_reg_vshift); |
| %} |
| |
| // ror expander |
| |
| instruct rorI_rReg(iRegINoSp dst, iRegI src, iRegI shift, rFlagsReg cr) |
| %{ |
| effect(DEF dst, USE src, USE shift); |
| |
| format %{ "ror $dst, $src, $shift" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ rorvw(as_Register($dst$$reg), as_Register($src$$reg), |
| as_Register($shift$$reg)); |
| %} |
| ins_pipe(ialu_reg_reg_vshift); |
| %} |
| |
| instruct rorL_rReg_Var_C_64(iRegLNoSp dst, iRegL src, iRegI shift, immI_64 c_64, rFlagsReg cr) |
| %{ |
| match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c_64 shift)))); |
| |
| expand %{ |
| rorL_rReg(dst, src, shift, cr); |
| %} |
| %} |
| |
| instruct rorL_rReg_Var_C0(iRegLNoSp dst, iRegL src, iRegI shift, immI0 c0, rFlagsReg cr) |
| %{ |
| match(Set dst (OrL (URShiftL src shift) (LShiftL src (SubI c0 shift)))); |
| |
| expand %{ |
| rorL_rReg(dst, src, shift, cr); |
| %} |
| %} |
| |
| instruct rorI_rReg_Var_C_32(iRegINoSp dst, iRegI src, iRegI shift, immI_32 c_32, rFlagsReg cr) |
| %{ |
| match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c_32 shift)))); |
| |
| expand %{ |
| rorI_rReg(dst, src, shift, cr); |
| %} |
| %} |
| |
| instruct rorI_rReg_Var_C0(iRegINoSp dst, iRegI src, iRegI shift, immI0 c0, rFlagsReg cr) |
| %{ |
| match(Set dst (OrI (URShiftI src shift) (LShiftI src (SubI c0 shift)))); |
| |
| expand %{ |
| rorI_rReg(dst, src, shift, cr); |
| %} |
| %} |
| |
| // Add/subtract (extended) |
| |
| instruct AddExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (ConvI2L src2))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2, sxtw" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtw); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %}; |
| |
| instruct SubExtI(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, rFlagsReg cr) |
| %{ |
| match(Set dst (SubL src1 (ConvI2L src2))); |
| ins_cost(INSN_COST); |
| format %{ "sub $dst, $src1, $src2, sxtw" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtw); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %}; |
| |
| |
| instruct AddExtI_sxth(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_16 lshift, immI_16 rshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2, sxth" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxth); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AddExtI_sxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddI src1 (RShiftI (LShiftI src2 lshift) rshift))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2, sxtb" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtb); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AddExtI_uxtb(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_24 lshift, immI_24 rshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddI src1 (URShiftI (LShiftI src2 lshift) rshift))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2, uxtb" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtb); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AddExtL_sxth(iRegLNoSp dst, iRegL src1, iRegL src2, immI_48 lshift, immI_48 rshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2, sxth" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxth); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AddExtL_sxtw(iRegLNoSp dst, iRegL src1, iRegL src2, immI_32 lshift, immI_32 rshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2, sxtw" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtw); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AddExtL_sxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (RShiftL (LShiftL src2 lshift) rshift))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2, sxtb" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtb); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AddExtL_uxtb(iRegLNoSp dst, iRegL src1, iRegL src2, immI_56 lshift, immI_56 rshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (URShiftL (LShiftL src2 lshift) rshift))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2, uxtb" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtb); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| |
| instruct AddExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr) |
| %{ |
| match(Set dst (AddI src1 (AndI src2 mask))); |
| ins_cost(INSN_COST); |
| format %{ "addw $dst, $src1, $src2, uxtb" %} |
| |
| ins_encode %{ |
| __ addw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtb); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AddExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr) |
| %{ |
| match(Set dst (AddI src1 (AndI src2 mask))); |
| ins_cost(INSN_COST); |
| format %{ "addw $dst, $src1, $src2, uxth" %} |
| |
| ins_encode %{ |
| __ addw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxth); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AddExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (AndL src2 mask))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2, uxtb" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtb); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AddExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (AndL src2 mask))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2, uxth" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxth); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct AddExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (AndL src2 mask))); |
| ins_cost(INSN_COST); |
| format %{ "add $dst, $src1, $src2, uxtw" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtw); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct SubExtI_uxtb_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, rFlagsReg cr) |
| %{ |
| match(Set dst (SubI src1 (AndI src2 mask))); |
| ins_cost(INSN_COST); |
| format %{ "subw $dst, $src1, $src2, uxtb" %} |
| |
| ins_encode %{ |
| __ subw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtb); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct SubExtI_uxth_and(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, rFlagsReg cr) |
| %{ |
| match(Set dst (SubI src1 (AndI src2 mask))); |
| ins_cost(INSN_COST); |
| format %{ "subw $dst, $src1, $src2, uxth" %} |
| |
| ins_encode %{ |
| __ subw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxth); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct SubExtL_uxtb_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, rFlagsReg cr) |
| %{ |
| match(Set dst (SubL src1 (AndL src2 mask))); |
| ins_cost(INSN_COST); |
| format %{ "sub $dst, $src1, $src2, uxtb" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtb); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct SubExtL_uxth_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, rFlagsReg cr) |
| %{ |
| match(Set dst (SubL src1 (AndL src2 mask))); |
| ins_cost(INSN_COST); |
| format %{ "sub $dst, $src1, $src2, uxth" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxth); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct SubExtL_uxtw_and(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, rFlagsReg cr) |
| %{ |
| match(Set dst (SubL src1 (AndL src2 mask))); |
| ins_cost(INSN_COST); |
| format %{ "sub $dst, $src1, $src2, uxtw" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtw); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| |
| instruct AddExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, sxtb #lshift2" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, sxth #lshift2" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxth, ($lshift2$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, sxtw #lshift2" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubExtL_sxtb_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_56 lshift1, immI_56 rshift1, rFlagsReg cr) |
| %{ |
| match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "sub $dst, $src1, $src2, sxtb #lshift2" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubExtL_sxth_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_48 lshift1, immI_48 rshift1, rFlagsReg cr) |
| %{ |
| match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "sub $dst, $src1, $src2, sxth #lshift2" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxth, ($lshift2$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubExtL_sxtw_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immIExt lshift2, immI_32 lshift1, immI_32 rshift1, rFlagsReg cr) |
| %{ |
| match(Set dst (SubL src1 (LShiftL (RShiftL (LShiftL src2 lshift1) rshift1) lshift2))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "sub $dst, $src1, $src2, sxtw #lshift2" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtw, ($lshift2$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr) |
| %{ |
| match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "addw $dst, $src1, $src2, sxtb #lshift2" %} |
| |
| ins_encode %{ |
| __ addw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr) |
| %{ |
| match(Set dst (AddI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "addw $dst, $src1, $src2, sxth #lshift2" %} |
| |
| ins_encode %{ |
| __ addw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxth, ($lshift2$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubExtI_sxtb_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_24 lshift1, immI_24 rshift1, rFlagsReg cr) |
| %{ |
| match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "subw $dst, $src1, $src2, sxtb #lshift2" %} |
| |
| ins_encode %{ |
| __ subw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtb, ($lshift2$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubExtI_sxth_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immIExt lshift2, immI_16 lshift1, immI_16 rshift1, rFlagsReg cr) |
| %{ |
| match(Set dst (SubI src1 (LShiftI (RShiftI (LShiftI src2 lshift1) rshift1) lshift2))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "subw $dst, $src1, $src2, sxth #lshift2" %} |
| |
| ins_encode %{ |
| __ subw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxth, ($lshift2$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| |
| instruct AddExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (LShiftL (ConvI2L src2) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, sxtw #lshift" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtw, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %}; |
| |
| instruct SubExtI_shift(iRegLNoSp dst, iRegL src1, iRegIorL2I src2, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (SubL src1 (LShiftL (ConvI2L src2) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "sub $dst, $src1, $src2, sxtw #lshift" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::sxtw, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %}; |
| |
| |
| instruct AddExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, uxtb #lshift" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtb, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, uxth #lshift" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxth, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddL src1 (LShiftL (AndL src2 mask) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "add $dst, $src1, $src2, uxtw #lshift" %} |
| |
| ins_encode %{ |
| __ add(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtw, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubExtL_uxtb_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_255 mask, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "sub $dst, $src1, $src2, uxtb #lshift" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtb, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubExtL_uxth_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_65535 mask, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "sub $dst, $src1, $src2, uxth #lshift" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxth, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubExtL_uxtw_and_shift(iRegLNoSp dst, iRegL src1, iRegL src2, immL_4294967295 mask, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (SubL src1 (LShiftL (AndL src2 mask) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "sub $dst, $src1, $src2, uxtw #lshift" %} |
| |
| ins_encode %{ |
| __ sub(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtw, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "addw $dst, $src1, $src2, uxtb #lshift" %} |
| |
| ins_encode %{ |
| __ addw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtb, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct AddExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (AddI src1 (LShiftI (AndI src2 mask) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "addw $dst, $src1, $src2, uxth #lshift" %} |
| |
| ins_encode %{ |
| __ addw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxth, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubExtI_uxtb_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_255 mask, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "subw $dst, $src1, $src2, uxtb #lshift" %} |
| |
| ins_encode %{ |
| __ subw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxtb, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| |
| instruct SubExtI_uxth_and_shift(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, immI_65535 mask, immIExt lshift, rFlagsReg cr) |
| %{ |
| match(Set dst (SubI src1 (LShiftI (AndI src2 mask) lshift))); |
| ins_cost(1.9 * INSN_COST); |
| format %{ "subw $dst, $src1, $src2, uxth #lshift" %} |
| |
| ins_encode %{ |
| __ subw(as_Register($dst$$reg), as_Register($src1$$reg), |
| as_Register($src2$$reg), ext::uxth, ($lshift$$constant)); |
| %} |
| ins_pipe(ialu_reg_reg_shift); |
| %} |
| // END This section of the file is automatically generated. Do not edit -------------- |
| |
| // ============================================================================ |
| // Floating Point Arithmetic Instructions |
| |
| instruct addF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{ |
| match(Set dst (AddF src1 src2)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "fadds $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ fadds(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| |
| ins_pipe(fp_dop_reg_reg_s); |
| %} |
| |
| instruct addD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{ |
| match(Set dst (AddD src1 src2)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "faddd $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ faddd(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| |
| ins_pipe(fp_dop_reg_reg_d); |
| %} |
| |
| instruct subF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{ |
| match(Set dst (SubF src1 src2)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "fsubs $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ fsubs(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| |
| ins_pipe(fp_dop_reg_reg_s); |
| %} |
| |
| instruct subD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{ |
| match(Set dst (SubD src1 src2)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "fsubd $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ fsubd(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| |
| ins_pipe(fp_dop_reg_reg_d); |
| %} |
| |
| instruct mulF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{ |
| match(Set dst (MulF src1 src2)); |
| |
| ins_cost(INSN_COST * 6); |
| format %{ "fmuls $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ fmuls(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| |
| ins_pipe(fp_dop_reg_reg_s); |
| %} |
| |
| instruct mulD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{ |
| match(Set dst (MulD src1 src2)); |
| |
| ins_cost(INSN_COST * 6); |
| format %{ "fmuld $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ fmuld(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| |
| ins_pipe(fp_dop_reg_reg_d); |
| %} |
| |
| // src1 * src2 + src3 |
| instruct maddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{ |
| predicate(UseFMA); |
| match(Set dst (FmaF src3 (Binary src1 src2))); |
| |
| format %{ "fmadds $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| __ fmadds(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src3$$reg)); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // src1 * src2 + src3 |
| instruct maddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{ |
| predicate(UseFMA); |
| match(Set dst (FmaD src3 (Binary src1 src2))); |
| |
| format %{ "fmaddd $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| __ fmaddd(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src3$$reg)); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // -src1 * src2 + src3 |
| instruct msubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{ |
| predicate(UseFMA); |
| match(Set dst (FmaF src3 (Binary (NegF src1) src2))); |
| match(Set dst (FmaF src3 (Binary src1 (NegF src2)))); |
| |
| format %{ "fmsubs $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| __ fmsubs(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src3$$reg)); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // -src1 * src2 + src3 |
| instruct msubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{ |
| predicate(UseFMA); |
| match(Set dst (FmaD src3 (Binary (NegD src1) src2))); |
| match(Set dst (FmaD src3 (Binary src1 (NegD src2)))); |
| |
| format %{ "fmsubd $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| __ fmsubd(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src3$$reg)); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // -src1 * src2 - src3 |
| instruct mnaddF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3) %{ |
| predicate(UseFMA); |
| match(Set dst (FmaF (NegF src3) (Binary (NegF src1) src2))); |
| match(Set dst (FmaF (NegF src3) (Binary src1 (NegF src2)))); |
| |
| format %{ "fnmadds $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| __ fnmadds(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src3$$reg)); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // -src1 * src2 - src3 |
| instruct mnaddD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3) %{ |
| predicate(UseFMA); |
| match(Set dst (FmaD (NegD src3) (Binary (NegD src1) src2))); |
| match(Set dst (FmaD (NegD src3) (Binary src1 (NegD src2)))); |
| |
| format %{ "fnmaddd $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| __ fnmaddd(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src3$$reg)); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // src1 * src2 - src3 |
| instruct mnsubF_reg_reg(vRegF dst, vRegF src1, vRegF src2, vRegF src3, immF0 zero) %{ |
| predicate(UseFMA); |
| match(Set dst (FmaF (NegF src3) (Binary src1 src2))); |
| |
| format %{ "fnmsubs $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| __ fnmsubs(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src3$$reg)); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // src1 * src2 - src3 |
| instruct mnsubD_reg_reg(vRegD dst, vRegD src1, vRegD src2, vRegD src3, immD0 zero) %{ |
| predicate(UseFMA); |
| match(Set dst (FmaD (NegD src3) (Binary src1 src2))); |
| |
| format %{ "fnmsubd $dst, $src1, $src2, $src3" %} |
| |
| ins_encode %{ |
| // n.b. insn name should be fnmsubd |
| __ fnmsub(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg), |
| as_FloatRegister($src3$$reg)); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| |
| instruct divF_reg_reg(vRegF dst, vRegF src1, vRegF src2) %{ |
| match(Set dst (DivF src1 src2)); |
| |
| ins_cost(INSN_COST * 18); |
| format %{ "fdivs $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ fdivs(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| |
| ins_pipe(fp_div_s); |
| %} |
| |
| instruct divD_reg_reg(vRegD dst, vRegD src1, vRegD src2) %{ |
| match(Set dst (DivD src1 src2)); |
| |
| ins_cost(INSN_COST * 32); |
| format %{ "fdivd $dst, $src1, $src2" %} |
| |
| ins_encode %{ |
| __ fdivd(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| |
| ins_pipe(fp_div_d); |
| %} |
| |
| instruct negF_reg_reg(vRegF dst, vRegF src) %{ |
| match(Set dst (NegF src)); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ "fneg $dst, $src" %} |
| |
| ins_encode %{ |
| __ fnegs(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_uop_s); |
| %} |
| |
| instruct negD_reg_reg(vRegD dst, vRegD src) %{ |
| match(Set dst (NegD src)); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ "fnegd $dst, $src" %} |
| |
| ins_encode %{ |
| __ fnegd(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_uop_d); |
| %} |
| |
| instruct absF_reg(vRegF dst, vRegF src) %{ |
| match(Set dst (AbsF src)); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ "fabss $dst, $src" %} |
| ins_encode %{ |
| __ fabss(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_uop_s); |
| %} |
| |
| instruct absD_reg(vRegD dst, vRegD src) %{ |
| match(Set dst (AbsD src)); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ "fabsd $dst, $src" %} |
| ins_encode %{ |
| __ fabsd(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_uop_d); |
| %} |
| |
| instruct sqrtD_reg(vRegD dst, vRegD src) %{ |
| match(Set dst (SqrtD src)); |
| |
| ins_cost(INSN_COST * 50); |
| format %{ "fsqrtd $dst, $src" %} |
| ins_encode %{ |
| __ fsqrtd(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_div_s); |
| %} |
| |
| instruct sqrtF_reg(vRegF dst, vRegF src) %{ |
| match(Set dst (ConvD2F (SqrtD (ConvF2D src)))); |
| |
| ins_cost(INSN_COST * 50); |
| format %{ "fsqrts $dst, $src" %} |
| ins_encode %{ |
| __ fsqrts(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_div_d); |
| %} |
| |
| // ============================================================================ |
| // Logical Instructions |
| |
| // Integer Logical Instructions |
| |
| // And Instructions |
| |
| |
| instruct andI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2, rFlagsReg cr) %{ |
| match(Set dst (AndI src1 src2)); |
| |
| format %{ "andw $dst, $src1, $src2\t# int" %} |
| |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ andw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct andI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2, rFlagsReg cr) %{ |
| match(Set dst (AndI src1 src2)); |
| |
| format %{ "andsw $dst, $src1, $src2\t# int" %} |
| |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ andw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| (unsigned long)($src2$$constant)); |
| %} |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| // Or Instructions |
| |
| instruct orI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (OrI src1 src2)); |
| |
| format %{ "orrw $dst, $src1, $src2\t# int" %} |
| |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ orrw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct orI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{ |
| match(Set dst (OrI src1 src2)); |
| |
| format %{ "orrw $dst, $src1, $src2\t# int" %} |
| |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ orrw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| (unsigned long)($src2$$constant)); |
| %} |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| // Xor Instructions |
| |
| instruct xorI_reg_reg(iRegINoSp dst, iRegIorL2I src1, iRegIorL2I src2) %{ |
| match(Set dst (XorI src1 src2)); |
| |
| format %{ "eorw $dst, $src1, $src2\t# int" %} |
| |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ eorw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct xorI_reg_imm(iRegINoSp dst, iRegIorL2I src1, immILog src2) %{ |
| match(Set dst (XorI src1 src2)); |
| |
| format %{ "eorw $dst, $src1, $src2\t# int" %} |
| |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ eorw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| (unsigned long)($src2$$constant)); |
| %} |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| // Long Logical Instructions |
| // TODO |
| |
| instruct andL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2, rFlagsReg cr) %{ |
| match(Set dst (AndL src1 src2)); |
| |
| format %{ "and $dst, $src1, $src2\t# int" %} |
| |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ andr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct andL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2, rFlagsReg cr) %{ |
| match(Set dst (AndL src1 src2)); |
| |
| format %{ "and $dst, $src1, $src2\t# int" %} |
| |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ andr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| (unsigned long)($src2$$constant)); |
| %} |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| // Or Instructions |
| |
| instruct orL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{ |
| match(Set dst (OrL src1 src2)); |
| |
| format %{ "orr $dst, $src1, $src2\t# int" %} |
| |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ orr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct orL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{ |
| match(Set dst (OrL src1 src2)); |
| |
| format %{ "orr $dst, $src1, $src2\t# int" %} |
| |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ orr(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| (unsigned long)($src2$$constant)); |
| %} |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| // Xor Instructions |
| |
| instruct xorL_reg_reg(iRegLNoSp dst, iRegL src1, iRegL src2) %{ |
| match(Set dst (XorL src1 src2)); |
| |
| format %{ "eor $dst, $src1, $src2\t# int" %} |
| |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ eor(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct xorL_reg_imm(iRegLNoSp dst, iRegL src1, immLLog src2) %{ |
| match(Set dst (XorL src1 src2)); |
| |
| ins_cost(INSN_COST); |
| format %{ "eor $dst, $src1, $src2\t# int" %} |
| |
| ins_encode %{ |
| __ eor(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| (unsigned long)($src2$$constant)); |
| %} |
| |
| ins_pipe(ialu_reg_imm); |
| %} |
| |
| instruct convI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src) |
| %{ |
| match(Set dst (ConvI2L src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "sxtw $dst, $src\t# i2l" %} |
| ins_encode %{ |
| __ sbfm($dst$$Register, $src$$Register, 0, 31); |
| %} |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // this pattern occurs in bigmath arithmetic |
| instruct convUI2L_reg_reg(iRegLNoSp dst, iRegIorL2I src, immL_32bits mask) |
| %{ |
| match(Set dst (AndL (ConvI2L src) mask)); |
| |
| ins_cost(INSN_COST); |
| format %{ "ubfm $dst, $src, 0, 31\t# ui2l" %} |
| ins_encode %{ |
| __ ubfm($dst$$Register, $src$$Register, 0, 31); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| instruct convL2I_reg(iRegINoSp dst, iRegL src) %{ |
| match(Set dst (ConvL2I src)); |
| |
| ins_cost(INSN_COST); |
| format %{ "movw $dst, $src \t// l2i" %} |
| |
| ins_encode %{ |
| __ movw(as_Register($dst$$reg), as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct convI2B(iRegINoSp dst, iRegIorL2I src, rFlagsReg cr) |
| %{ |
| match(Set dst (Conv2B src)); |
| effect(KILL cr); |
| |
| format %{ |
| "cmpw $src, zr\n\t" |
| "cset $dst, ne" |
| %} |
| |
| ins_encode %{ |
| __ cmpw(as_Register($src$$reg), zr); |
| __ cset(as_Register($dst$$reg), Assembler::NE); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct convP2B(iRegINoSp dst, iRegP src, rFlagsReg cr) |
| %{ |
| match(Set dst (Conv2B src)); |
| effect(KILL cr); |
| |
| format %{ |
| "cmp $src, zr\n\t" |
| "cset $dst, ne" |
| %} |
| |
| ins_encode %{ |
| __ cmp(as_Register($src$$reg), zr); |
| __ cset(as_Register($dst$$reg), Assembler::NE); |
| %} |
| |
| ins_pipe(ialu_reg); |
| %} |
| |
| instruct convD2F_reg(vRegF dst, vRegD src) %{ |
| match(Set dst (ConvD2F src)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "fcvtd $dst, $src \t// d2f" %} |
| |
| ins_encode %{ |
| __ fcvtd(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_d2f); |
| %} |
| |
| instruct convF2D_reg(vRegD dst, vRegF src) %{ |
| match(Set dst (ConvF2D src)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "fcvts $dst, $src \t// f2d" %} |
| |
| ins_encode %{ |
| __ fcvts(as_FloatRegister($dst$$reg), as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_f2d); |
| %} |
| |
| instruct convF2I_reg_reg(iRegINoSp dst, vRegF src) %{ |
| match(Set dst (ConvF2I src)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "fcvtzsw $dst, $src \t// f2i" %} |
| |
| ins_encode %{ |
| __ fcvtzsw(as_Register($dst$$reg), as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_f2i); |
| %} |
| |
| instruct convF2L_reg_reg(iRegLNoSp dst, vRegF src) %{ |
| match(Set dst (ConvF2L src)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "fcvtzs $dst, $src \t// f2l" %} |
| |
| ins_encode %{ |
| __ fcvtzs(as_Register($dst$$reg), as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_f2l); |
| %} |
| |
| instruct convI2F_reg_reg(vRegF dst, iRegIorL2I src) %{ |
| match(Set dst (ConvI2F src)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "scvtfws $dst, $src \t// i2f" %} |
| |
| ins_encode %{ |
| __ scvtfws(as_FloatRegister($dst$$reg), as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(fp_i2f); |
| %} |
| |
| instruct convL2F_reg_reg(vRegF dst, iRegL src) %{ |
| match(Set dst (ConvL2F src)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "scvtfs $dst, $src \t// l2f" %} |
| |
| ins_encode %{ |
| __ scvtfs(as_FloatRegister($dst$$reg), as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(fp_l2f); |
| %} |
| |
| instruct convD2I_reg_reg(iRegINoSp dst, vRegD src) %{ |
| match(Set dst (ConvD2I src)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "fcvtzdw $dst, $src \t// d2i" %} |
| |
| ins_encode %{ |
| __ fcvtzdw(as_Register($dst$$reg), as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_d2i); |
| %} |
| |
| instruct convD2L_reg_reg(iRegLNoSp dst, vRegD src) %{ |
| match(Set dst (ConvD2L src)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "fcvtzd $dst, $src \t// d2l" %} |
| |
| ins_encode %{ |
| __ fcvtzd(as_Register($dst$$reg), as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_d2l); |
| %} |
| |
| instruct convI2D_reg_reg(vRegD dst, iRegIorL2I src) %{ |
| match(Set dst (ConvI2D src)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "scvtfwd $dst, $src \t// i2d" %} |
| |
| ins_encode %{ |
| __ scvtfwd(as_FloatRegister($dst$$reg), as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(fp_i2d); |
| %} |
| |
| instruct convL2D_reg_reg(vRegD dst, iRegL src) %{ |
| match(Set dst (ConvL2D src)); |
| |
| ins_cost(INSN_COST * 5); |
| format %{ "scvtfd $dst, $src \t// l2d" %} |
| |
| ins_encode %{ |
| __ scvtfd(as_FloatRegister($dst$$reg), as_Register($src$$reg)); |
| %} |
| |
| ins_pipe(fp_l2d); |
| %} |
| |
| // stack <-> reg and reg <-> reg shuffles with no conversion |
| |
| instruct MoveF2I_stack_reg(iRegINoSp dst, stackSlotF src) %{ |
| |
| match(Set dst (MoveF2I src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(4 * INSN_COST); |
| |
| format %{ "ldrw $dst, $src\t# MoveF2I_stack_reg" %} |
| |
| ins_encode %{ |
| __ ldrw($dst$$Register, Address(sp, $src$$disp)); |
| %} |
| |
| ins_pipe(iload_reg_reg); |
| |
| %} |
| |
| instruct MoveI2F_stack_reg(vRegF dst, stackSlotI src) %{ |
| |
| match(Set dst (MoveI2F src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(4 * INSN_COST); |
| |
| format %{ "ldrs $dst, $src\t# MoveI2F_stack_reg" %} |
| |
| ins_encode %{ |
| __ ldrs(as_FloatRegister($dst$$reg), Address(sp, $src$$disp)); |
| %} |
| |
| ins_pipe(pipe_class_memory); |
| |
| %} |
| |
| instruct MoveD2L_stack_reg(iRegLNoSp dst, stackSlotD src) %{ |
| |
| match(Set dst (MoveD2L src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(4 * INSN_COST); |
| |
| format %{ "ldr $dst, $src\t# MoveD2L_stack_reg" %} |
| |
| ins_encode %{ |
| __ ldr($dst$$Register, Address(sp, $src$$disp)); |
| %} |
| |
| ins_pipe(iload_reg_reg); |
| |
| %} |
| |
| instruct MoveL2D_stack_reg(vRegD dst, stackSlotL src) %{ |
| |
| match(Set dst (MoveL2D src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(4 * INSN_COST); |
| |
| format %{ "ldrd $dst, $src\t# MoveL2D_stack_reg" %} |
| |
| ins_encode %{ |
| __ ldrd(as_FloatRegister($dst$$reg), Address(sp, $src$$disp)); |
| %} |
| |
| ins_pipe(pipe_class_memory); |
| |
| %} |
| |
| instruct MoveF2I_reg_stack(stackSlotI dst, vRegF src) %{ |
| |
| match(Set dst (MoveF2I src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(INSN_COST); |
| |
| format %{ "strs $src, $dst\t# MoveF2I_reg_stack" %} |
| |
| ins_encode %{ |
| __ strs(as_FloatRegister($src$$reg), Address(sp, $dst$$disp)); |
| %} |
| |
| ins_pipe(pipe_class_memory); |
| |
| %} |
| |
| instruct MoveI2F_reg_stack(stackSlotF dst, iRegI src) %{ |
| |
| match(Set dst (MoveI2F src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(INSN_COST); |
| |
| format %{ "strw $src, $dst\t# MoveI2F_reg_stack" %} |
| |
| ins_encode %{ |
| __ strw($src$$Register, Address(sp, $dst$$disp)); |
| %} |
| |
| ins_pipe(istore_reg_reg); |
| |
| %} |
| |
| instruct MoveD2L_reg_stack(stackSlotL dst, vRegD src) %{ |
| |
| match(Set dst (MoveD2L src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(INSN_COST); |
| |
| format %{ "strd $dst, $src\t# MoveD2L_reg_stack" %} |
| |
| ins_encode %{ |
| __ strd(as_FloatRegister($src$$reg), Address(sp, $dst$$disp)); |
| %} |
| |
| ins_pipe(pipe_class_memory); |
| |
| %} |
| |
| instruct MoveL2D_reg_stack(stackSlotD dst, iRegL src) %{ |
| |
| match(Set dst (MoveL2D src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(INSN_COST); |
| |
| format %{ "str $src, $dst\t# MoveL2D_reg_stack" %} |
| |
| ins_encode %{ |
| __ str($src$$Register, Address(sp, $dst$$disp)); |
| %} |
| |
| ins_pipe(istore_reg_reg); |
| |
| %} |
| |
| instruct MoveF2I_reg_reg(iRegINoSp dst, vRegF src) %{ |
| |
| match(Set dst (MoveF2I src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(INSN_COST); |
| |
| format %{ "fmovs $dst, $src\t# MoveF2I_reg_reg" %} |
| |
| ins_encode %{ |
| __ fmovs($dst$$Register, as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_f2i); |
| |
| %} |
| |
| instruct MoveI2F_reg_reg(vRegF dst, iRegI src) %{ |
| |
| match(Set dst (MoveI2F src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(INSN_COST); |
| |
| format %{ "fmovs $dst, $src\t# MoveI2F_reg_reg" %} |
| |
| ins_encode %{ |
| __ fmovs(as_FloatRegister($dst$$reg), $src$$Register); |
| %} |
| |
| ins_pipe(fp_i2f); |
| |
| %} |
| |
| instruct MoveD2L_reg_reg(iRegLNoSp dst, vRegD src) %{ |
| |
| match(Set dst (MoveD2L src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(INSN_COST); |
| |
| format %{ "fmovd $dst, $src\t# MoveD2L_reg_reg" %} |
| |
| ins_encode %{ |
| __ fmovd($dst$$Register, as_FloatRegister($src$$reg)); |
| %} |
| |
| ins_pipe(fp_d2l); |
| |
| %} |
| |
| instruct MoveL2D_reg_reg(vRegD dst, iRegL src) %{ |
| |
| match(Set dst (MoveL2D src)); |
| |
| effect(DEF dst, USE src); |
| |
| ins_cost(INSN_COST); |
| |
| format %{ "fmovd $dst, $src\t# MoveL2D_reg_reg" %} |
| |
| ins_encode %{ |
| __ fmovd(as_FloatRegister($dst$$reg), $src$$Register); |
| %} |
| |
| ins_pipe(fp_l2d); |
| |
| %} |
| |
| // ============================================================================ |
| // clearing of an array |
| |
| instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr) |
| %{ |
| match(Set dummy (ClearArray cnt base)); |
| effect(USE_KILL cnt, USE_KILL base); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ClearArray $cnt, $base" %} |
| |
| ins_encode %{ |
| __ zero_words($base$$Register, $cnt$$Register); |
| %} |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, Universe dummy, rFlagsReg cr) |
| %{ |
| predicate((u_int64_t)n->in(2)->get_long() |
| < (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord)); |
| match(Set dummy (ClearArray cnt base)); |
| effect(USE_KILL base); |
| |
| ins_cost(4 * INSN_COST); |
| format %{ "ClearArray $cnt, $base" %} |
| |
| ins_encode %{ |
| __ zero_words($base$$Register, (u_int64_t)$cnt$$constant); |
| %} |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // ============================================================================ |
| // Overflow Math Instructions |
| |
| instruct overflowAddI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2) |
| %{ |
| match(Set cr (OverflowAddI op1 op2)); |
| |
| format %{ "cmnw $op1, $op2\t# overflow check int" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ cmnw($op1$$Register, $op2$$Register); |
| %} |
| |
| ins_pipe(icmp_reg_reg); |
| %} |
| |
| instruct overflowAddI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2) |
| %{ |
| match(Set cr (OverflowAddI op1 op2)); |
| |
| format %{ "cmnw $op1, $op2\t# overflow check int" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ cmnw($op1$$Register, $op2$$constant); |
| %} |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct overflowAddL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2) |
| %{ |
| match(Set cr (OverflowAddL op1 op2)); |
| |
| format %{ "cmn $op1, $op2\t# overflow check long" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ cmn($op1$$Register, $op2$$Register); |
| %} |
| |
| ins_pipe(icmp_reg_reg); |
| %} |
| |
| instruct overflowAddL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2) |
| %{ |
| match(Set cr (OverflowAddL op1 op2)); |
| |
| format %{ "cmn $op1, $op2\t# overflow check long" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ cmn($op1$$Register, $op2$$constant); |
| %} |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct overflowSubI_reg_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2) |
| %{ |
| match(Set cr (OverflowSubI op1 op2)); |
| |
| format %{ "cmpw $op1, $op2\t# overflow check int" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ cmpw($op1$$Register, $op2$$Register); |
| %} |
| |
| ins_pipe(icmp_reg_reg); |
| %} |
| |
| instruct overflowSubI_reg_imm(rFlagsReg cr, iRegIorL2I op1, immIAddSub op2) |
| %{ |
| match(Set cr (OverflowSubI op1 op2)); |
| |
| format %{ "cmpw $op1, $op2\t# overflow check int" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ cmpw($op1$$Register, $op2$$constant); |
| %} |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct overflowSubL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2) |
| %{ |
| match(Set cr (OverflowSubL op1 op2)); |
| |
| format %{ "cmp $op1, $op2\t# overflow check long" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ cmp($op1$$Register, $op2$$Register); |
| %} |
| |
| ins_pipe(icmp_reg_reg); |
| %} |
| |
| instruct overflowSubL_reg_imm(rFlagsReg cr, iRegL op1, immLAddSub op2) |
| %{ |
| match(Set cr (OverflowSubL op1 op2)); |
| |
| format %{ "cmp $op1, $op2\t# overflow check long" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ cmp($op1$$Register, $op2$$constant); |
| %} |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct overflowNegI_reg(rFlagsReg cr, immI0 zero, iRegIorL2I op1) |
| %{ |
| match(Set cr (OverflowSubI zero op1)); |
| |
| format %{ "cmpw zr, $op1\t# overflow check int" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ cmpw(zr, $op1$$Register); |
| %} |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct overflowNegL_reg(rFlagsReg cr, immI0 zero, iRegL op1) |
| %{ |
| match(Set cr (OverflowSubL zero op1)); |
| |
| format %{ "cmp zr, $op1\t# overflow check long" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ cmp(zr, $op1$$Register); |
| %} |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct overflowMulI_reg(rFlagsReg cr, iRegIorL2I op1, iRegIorL2I op2) |
| %{ |
| match(Set cr (OverflowMulI op1 op2)); |
| |
| format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t" |
| "cmp rscratch1, rscratch1, sxtw\n\t" |
| "movw rscratch1, #0x80000000\n\t" |
| "cselw rscratch1, rscratch1, zr, NE\n\t" |
| "cmpw rscratch1, #1" %} |
| ins_cost(5 * INSN_COST); |
| ins_encode %{ |
| __ smull(rscratch1, $op1$$Register, $op2$$Register); |
| __ subs(zr, rscratch1, rscratch1, ext::sxtw); // NE => overflow |
| __ movw(rscratch1, 0x80000000); // Develop 0 (EQ), |
| __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE) |
| __ cmpw(rscratch1, 1); // 0x80000000 - 1 => VS |
| %} |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct overflowMulI_reg_branch(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, label labl, rFlagsReg cr) |
| %{ |
| match(If cmp (OverflowMulI op1 op2)); |
| predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow |
| || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow); |
| effect(USE labl, KILL cr); |
| |
| format %{ "smull rscratch1, $op1, $op2\t# overflow check int\n\t" |
| "cmp rscratch1, rscratch1, sxtw\n\t" |
| "b$cmp $labl" %} |
| ins_cost(3 * INSN_COST); // Branch is rare so treat as INSN_COST |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| __ smull(rscratch1, $op1$$Register, $op2$$Register); |
| __ subs(zr, rscratch1, rscratch1, ext::sxtw); // NE => overflow |
| __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L); |
| %} |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct overflowMulL_reg(rFlagsReg cr, iRegL op1, iRegL op2) |
| %{ |
| match(Set cr (OverflowMulL op1 op2)); |
| |
| format %{ "mul rscratch1, $op1, $op2\t#overflow check long\n\t" |
| "smulh rscratch2, $op1, $op2\n\t" |
| "cmp rscratch2, rscratch1, ASR #63\n\t" |
| "movw rscratch1, #0x80000000\n\t" |
| "cselw rscratch1, rscratch1, zr, NE\n\t" |
| "cmpw rscratch1, #1" %} |
| ins_cost(6 * INSN_COST); |
| ins_encode %{ |
| __ mul(rscratch1, $op1$$Register, $op2$$Register); // Result bits 0..63 |
| __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127 |
| __ cmp(rscratch2, rscratch1, Assembler::ASR, 63); // Top is pure sign ext |
| __ movw(rscratch1, 0x80000000); // Develop 0 (EQ), |
| __ cselw(rscratch1, rscratch1, zr, Assembler::NE); // or 0x80000000 (NE) |
| __ cmpw(rscratch1, 1); // 0x80000000 - 1 => VS |
| %} |
| |
| ins_pipe(pipe_slow); |
| %} |
| |
| instruct overflowMulL_reg_branch(cmpOp cmp, iRegL op1, iRegL op2, label labl, rFlagsReg cr) |
| %{ |
| match(If cmp (OverflowMulL op1 op2)); |
| predicate(n->in(1)->as_Bool()->_test._test == BoolTest::overflow |
| || n->in(1)->as_Bool()->_test._test == BoolTest::no_overflow); |
| effect(USE labl, KILL cr); |
| |
| format %{ "mul rscratch1, $op1, $op2\t#overflow check long\n\t" |
| "smulh rscratch2, $op1, $op2\n\t" |
| "cmp rscratch2, rscratch1, ASR #63\n\t" |
| "b$cmp $labl" %} |
| ins_cost(4 * INSN_COST); // Branch is rare so treat as INSN_COST |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| __ mul(rscratch1, $op1$$Register, $op2$$Register); // Result bits 0..63 |
| __ smulh(rscratch2, $op1$$Register, $op2$$Register); // Result bits 64..127 |
| __ cmp(rscratch2, rscratch1, Assembler::ASR, 63); // Top is pure sign ext |
| __ br(cond == Assembler::VS ? Assembler::NE : Assembler::EQ, *L); |
| %} |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| // ============================================================================ |
| // Compare Instructions |
| |
| instruct compI_reg_reg(rFlagsReg cr, iRegI op1, iRegI op2) |
| %{ |
| match(Set cr (CmpI op1 op2)); |
| |
| effect(DEF cr, USE op1, USE op2); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmpw $op1, $op2" %} |
| |
| ins_encode(aarch64_enc_cmpw(op1, op2)); |
| |
| ins_pipe(icmp_reg_reg); |
| %} |
| |
| instruct compI_reg_immI0(rFlagsReg cr, iRegI op1, immI0 zero) |
| %{ |
| match(Set cr (CmpI op1 zero)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmpw $op1, 0" %} |
| |
| ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct compI_reg_immIAddSub(rFlagsReg cr, iRegI op1, immIAddSub op2) |
| %{ |
| match(Set cr (CmpI op1 op2)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmpw $op1, $op2" %} |
| |
| ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct compI_reg_immI(rFlagsReg cr, iRegI op1, immI op2) |
| %{ |
| match(Set cr (CmpI op1 op2)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cmpw $op1, $op2" %} |
| |
| ins_encode(aarch64_enc_cmpw_imm(op1, op2)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| // Unsigned compare Instructions; really, same as signed compare |
| // except it should only be used to feed an If or a CMovI which takes a |
| // cmpOpU. |
| |
| instruct compU_reg_reg(rFlagsRegU cr, iRegI op1, iRegI op2) |
| %{ |
| match(Set cr (CmpU op1 op2)); |
| |
| effect(DEF cr, USE op1, USE op2); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmpw $op1, $op2\t# unsigned" %} |
| |
| ins_encode(aarch64_enc_cmpw(op1, op2)); |
| |
| ins_pipe(icmp_reg_reg); |
| %} |
| |
| instruct compU_reg_immI0(rFlagsRegU cr, iRegI op1, immI0 zero) |
| %{ |
| match(Set cr (CmpU op1 zero)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmpw $op1, #0\t# unsigned" %} |
| |
| ins_encode(aarch64_enc_cmpw_imm_addsub(op1, zero)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct compU_reg_immIAddSub(rFlagsRegU cr, iRegI op1, immIAddSub op2) |
| %{ |
| match(Set cr (CmpU op1 op2)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmpw $op1, $op2\t# unsigned" %} |
| |
| ins_encode(aarch64_enc_cmpw_imm_addsub(op1, op2)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct compU_reg_immI(rFlagsRegU cr, iRegI op1, immI op2) |
| %{ |
| match(Set cr (CmpU op1 op2)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cmpw $op1, $op2\t# unsigned" %} |
| |
| ins_encode(aarch64_enc_cmpw_imm(op1, op2)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct compL_reg_reg(rFlagsReg cr, iRegL op1, iRegL op2) |
| %{ |
| match(Set cr (CmpL op1 op2)); |
| |
| effect(DEF cr, USE op1, USE op2); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmp $op1, $op2" %} |
| |
| ins_encode(aarch64_enc_cmp(op1, op2)); |
| |
| ins_pipe(icmp_reg_reg); |
| %} |
| |
| instruct compL_reg_immL0(rFlagsReg cr, iRegL op1, immL0 zero) |
| %{ |
| match(Set cr (CmpL op1 zero)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST); |
| format %{ "tst $op1" %} |
| |
| ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct compL_reg_immLAddSub(rFlagsReg cr, iRegL op1, immLAddSub op2) |
| %{ |
| match(Set cr (CmpL op1 op2)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmp $op1, $op2" %} |
| |
| ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct compL_reg_immL(rFlagsReg cr, iRegL op1, immL op2) |
| %{ |
| match(Set cr (CmpL op1 op2)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cmp $op1, $op2" %} |
| |
| ins_encode(aarch64_enc_cmp_imm(op1, op2)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct compUL_reg_reg(rFlagsRegU cr, iRegL op1, iRegL op2) |
| %{ |
| match(Set cr (CmpUL op1 op2)); |
| |
| effect(DEF cr, USE op1, USE op2); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmp $op1, $op2" %} |
| |
| ins_encode(aarch64_enc_cmp(op1, op2)); |
| |
| ins_pipe(icmp_reg_reg); |
| %} |
| |
| instruct compUL_reg_immL0(rFlagsRegU cr, iRegL op1, immL0 zero) |
| %{ |
| match(Set cr (CmpUL op1 zero)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST); |
| format %{ "tst $op1" %} |
| |
| ins_encode(aarch64_enc_cmp_imm_addsub(op1, zero)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct compUL_reg_immLAddSub(rFlagsRegU cr, iRegL op1, immLAddSub op2) |
| %{ |
| match(Set cr (CmpUL op1 op2)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmp $op1, $op2" %} |
| |
| ins_encode(aarch64_enc_cmp_imm_addsub(op1, op2)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct compUL_reg_immL(rFlagsRegU cr, iRegL op1, immL op2) |
| %{ |
| match(Set cr (CmpUL op1 op2)); |
| |
| effect(DEF cr, USE op1); |
| |
| ins_cost(INSN_COST * 2); |
| format %{ "cmp $op1, $op2" %} |
| |
| ins_encode(aarch64_enc_cmp_imm(op1, op2)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct compP_reg_reg(rFlagsRegU cr, iRegP op1, iRegP op2) |
| %{ |
| match(Set cr (CmpP op1 op2)); |
| |
| effect(DEF cr, USE op1, USE op2); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmp $op1, $op2\t // ptr" %} |
| |
| ins_encode(aarch64_enc_cmpp(op1, op2)); |
| |
| ins_pipe(icmp_reg_reg); |
| %} |
| |
| instruct compN_reg_reg(rFlagsRegU cr, iRegN op1, iRegN op2) |
| %{ |
| match(Set cr (CmpN op1 op2)); |
| |
| effect(DEF cr, USE op1, USE op2); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmp $op1, $op2\t // compressed ptr" %} |
| |
| ins_encode(aarch64_enc_cmpn(op1, op2)); |
| |
| ins_pipe(icmp_reg_reg); |
| %} |
| |
| instruct testP_reg(rFlagsRegU cr, iRegP op1, immP0 zero) |
| %{ |
| match(Set cr (CmpP op1 zero)); |
| |
| effect(DEF cr, USE op1, USE zero); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmp $op1, 0\t // ptr" %} |
| |
| ins_encode(aarch64_enc_testp(op1)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| instruct testN_reg(rFlagsRegU cr, iRegN op1, immN0 zero) |
| %{ |
| match(Set cr (CmpN op1 zero)); |
| |
| effect(DEF cr, USE op1, USE zero); |
| |
| ins_cost(INSN_COST); |
| format %{ "cmp $op1, 0\t // compressed ptr" %} |
| |
| ins_encode(aarch64_enc_testn(op1)); |
| |
| ins_pipe(icmp_reg_imm); |
| %} |
| |
| // FP comparisons |
| // |
| // n.b. CmpF/CmpD set a normal flags reg which then gets compared |
| // using normal cmpOp. See declaration of rFlagsReg for details. |
| |
| instruct compF_reg_reg(rFlagsReg cr, vRegF src1, vRegF src2) |
| %{ |
| match(Set cr (CmpF src1 src2)); |
| |
| ins_cost(3 * INSN_COST); |
| format %{ "fcmps $src1, $src2" %} |
| |
| ins_encode %{ |
| __ fcmps(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); |
| %} |
| |
| ins_pipe(pipe_class_compare); |
| %} |
| |
| instruct compF_reg_zero(rFlagsReg cr, vRegF src1, immF0 src2) |
| %{ |
| match(Set cr (CmpF src1 src2)); |
| |
| ins_cost(3 * INSN_COST); |
| format %{ "fcmps $src1, 0.0" %} |
| |
| ins_encode %{ |
| __ fcmps(as_FloatRegister($src1$$reg), 0.0D); |
| %} |
| |
| ins_pipe(pipe_class_compare); |
| %} |
| // FROM HERE |
| |
| instruct compD_reg_reg(rFlagsReg cr, vRegD src1, vRegD src2) |
| %{ |
| match(Set cr (CmpD src1 src2)); |
| |
| ins_cost(3 * INSN_COST); |
| format %{ "fcmpd $src1, $src2" %} |
| |
| ins_encode %{ |
| __ fcmpd(as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); |
| %} |
| |
| ins_pipe(pipe_class_compare); |
| %} |
| |
| instruct compD_reg_zero(rFlagsReg cr, vRegD src1, immD0 src2) |
| %{ |
| match(Set cr (CmpD src1 src2)); |
| |
| ins_cost(3 * INSN_COST); |
| format %{ "fcmpd $src1, 0.0" %} |
| |
| ins_encode %{ |
| __ fcmpd(as_FloatRegister($src1$$reg), 0.0D); |
| %} |
| |
| ins_pipe(pipe_class_compare); |
| %} |
| |
| instruct compF3_reg_reg(iRegINoSp dst, vRegF src1, vRegF src2, rFlagsReg cr) |
| %{ |
| match(Set dst (CmpF3 src1 src2)); |
| effect(KILL cr); |
| |
| ins_cost(5 * INSN_COST); |
| format %{ "fcmps $src1, $src2\n\t" |
| "csinvw($dst, zr, zr, eq\n\t" |
| "csnegw($dst, $dst, $dst, lt)" |
| %} |
| |
| ins_encode %{ |
| Label done; |
| FloatRegister s1 = as_FloatRegister($src1$$reg); |
| FloatRegister s2 = as_FloatRegister($src2$$reg); |
| Register d = as_Register($dst$$reg); |
| __ fcmps(s1, s2); |
| // installs 0 if EQ else -1 |
| __ csinvw(d, zr, zr, Assembler::EQ); |
| // keeps -1 if less or unordered else installs 1 |
| __ csnegw(d, d, d, Assembler::LT); |
| __ bind(done); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| |
| %} |
| |
| instruct compD3_reg_reg(iRegINoSp dst, vRegD src1, vRegD src2, rFlagsReg cr) |
| %{ |
| match(Set dst (CmpD3 src1 src2)); |
| effect(KILL cr); |
| |
| ins_cost(5 * INSN_COST); |
| format %{ "fcmpd $src1, $src2\n\t" |
| "csinvw($dst, zr, zr, eq\n\t" |
| "csnegw($dst, $dst, $dst, lt)" |
| %} |
| |
| ins_encode %{ |
| Label done; |
| FloatRegister s1 = as_FloatRegister($src1$$reg); |
| FloatRegister s2 = as_FloatRegister($src2$$reg); |
| Register d = as_Register($dst$$reg); |
| __ fcmpd(s1, s2); |
| // installs 0 if EQ else -1 |
| __ csinvw(d, zr, zr, Assembler::EQ); |
| // keeps -1 if less or unordered else installs 1 |
| __ csnegw(d, d, d, Assembler::LT); |
| __ bind(done); |
| %} |
| ins_pipe(pipe_class_default); |
| |
| %} |
| |
| instruct compF3_reg_immF0(iRegINoSp dst, vRegF src1, immF0 zero, rFlagsReg cr) |
| %{ |
| match(Set dst (CmpF3 src1 zero)); |
| effect(KILL cr); |
| |
| ins_cost(5 * INSN_COST); |
| format %{ "fcmps $src1, 0.0\n\t" |
| "csinvw($dst, zr, zr, eq\n\t" |
| "csnegw($dst, $dst, $dst, lt)" |
| %} |
| |
| ins_encode %{ |
| Label done; |
| FloatRegister s1 = as_FloatRegister($src1$$reg); |
| Register d = as_Register($dst$$reg); |
| __ fcmps(s1, 0.0D); |
| // installs 0 if EQ else -1 |
| __ csinvw(d, zr, zr, Assembler::EQ); |
| // keeps -1 if less or unordered else installs 1 |
| __ csnegw(d, d, d, Assembler::LT); |
| __ bind(done); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| |
| %} |
| |
| instruct compD3_reg_immD0(iRegINoSp dst, vRegD src1, immD0 zero, rFlagsReg cr) |
| %{ |
| match(Set dst (CmpD3 src1 zero)); |
| effect(KILL cr); |
| |
| ins_cost(5 * INSN_COST); |
| format %{ "fcmpd $src1, 0.0\n\t" |
| "csinvw($dst, zr, zr, eq\n\t" |
| "csnegw($dst, $dst, $dst, lt)" |
| %} |
| |
| ins_encode %{ |
| Label done; |
| FloatRegister s1 = as_FloatRegister($src1$$reg); |
| Register d = as_Register($dst$$reg); |
| __ fcmpd(s1, 0.0D); |
| // installs 0 if EQ else -1 |
| __ csinvw(d, zr, zr, Assembler::EQ); |
| // keeps -1 if less or unordered else installs 1 |
| __ csnegw(d, d, d, Assembler::LT); |
| __ bind(done); |
| %} |
| ins_pipe(pipe_class_default); |
| |
| %} |
| |
| instruct cmpLTMask_reg_reg(iRegINoSp dst, iRegIorL2I p, iRegIorL2I q, rFlagsReg cr) |
| %{ |
| match(Set dst (CmpLTMask p q)); |
| effect(KILL cr); |
| |
| ins_cost(3 * INSN_COST); |
| |
| format %{ "cmpw $p, $q\t# cmpLTMask\n\t" |
| "csetw $dst, lt\n\t" |
| "subw $dst, zr, $dst" |
| %} |
| |
| ins_encode %{ |
| __ cmpw(as_Register($p$$reg), as_Register($q$$reg)); |
| __ csetw(as_Register($dst$$reg), Assembler::LT); |
| __ subw(as_Register($dst$$reg), zr, as_Register($dst$$reg)); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct cmpLTMask_reg_zero(iRegINoSp dst, iRegIorL2I src, immI0 zero, rFlagsReg cr) |
| %{ |
| match(Set dst (CmpLTMask src zero)); |
| effect(KILL cr); |
| |
| ins_cost(INSN_COST); |
| |
| format %{ "asrw $dst, $src, #31\t# cmpLTMask0" %} |
| |
| ins_encode %{ |
| __ asrw(as_Register($dst$$reg), as_Register($src$$reg), 31); |
| %} |
| |
| ins_pipe(ialu_reg_shift); |
| %} |
| |
| // ============================================================================ |
| // Max and Min |
| |
| instruct minI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr) |
| %{ |
| match(Set dst (MinI src1 src2)); |
| |
| effect(DEF dst, USE src1, USE src2, KILL cr); |
| size(8); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ |
| "cmpw $src1 $src2\t signed int\n\t" |
| "cselw $dst, $src1, $src2 lt\t" |
| %} |
| |
| ins_encode %{ |
| __ cmpw(as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| __ cselw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::LT); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| // FROM HERE |
| |
| instruct maxI_rReg(iRegINoSp dst, iRegI src1, iRegI src2, rFlagsReg cr) |
| %{ |
| match(Set dst (MaxI src1 src2)); |
| |
| effect(DEF dst, USE src1, USE src2, KILL cr); |
| size(8); |
| |
| ins_cost(INSN_COST * 3); |
| format %{ |
| "cmpw $src1 $src2\t signed int\n\t" |
| "cselw $dst, $src1, $src2 gt\t" |
| %} |
| |
| ins_encode %{ |
| __ cmpw(as_Register($src1$$reg), |
| as_Register($src2$$reg)); |
| __ cselw(as_Register($dst$$reg), |
| as_Register($src1$$reg), |
| as_Register($src2$$reg), |
| Assembler::GT); |
| %} |
| |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| // ============================================================================ |
| // Branch Instructions |
| |
| // Direct Branch. |
| instruct branch(label lbl) |
| %{ |
| match(Goto); |
| |
| effect(USE lbl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "b $lbl" %} |
| |
| ins_encode(aarch64_enc_b(lbl)); |
| |
| ins_pipe(pipe_branch); |
| %} |
| |
| // Conditional Near Branch |
| instruct branchCon(cmpOp cmp, rFlagsReg cr, label lbl) |
| %{ |
| // Same match rule as `branchConFar'. |
| match(If cmp cr); |
| |
| effect(USE lbl); |
| |
| ins_cost(BRANCH_COST); |
| // If set to 1 this indicates that the current instruction is a |
| // short variant of a long branch. This avoids using this |
| // instruction in first-pass matching. It will then only be used in |
| // the `Shorten_branches' pass. |
| // ins_short_branch(1); |
| format %{ "b$cmp $lbl" %} |
| |
| ins_encode(aarch64_enc_br_con(cmp, lbl)); |
| |
| ins_pipe(pipe_branch_cond); |
| %} |
| |
| // Conditional Near Branch Unsigned |
| instruct branchConU(cmpOpU cmp, rFlagsRegU cr, label lbl) |
| %{ |
| // Same match rule as `branchConFar'. |
| match(If cmp cr); |
| |
| effect(USE lbl); |
| |
| ins_cost(BRANCH_COST); |
| // If set to 1 this indicates that the current instruction is a |
| // short variant of a long branch. This avoids using this |
| // instruction in first-pass matching. It will then only be used in |
| // the `Shorten_branches' pass. |
| // ins_short_branch(1); |
| format %{ "b$cmp $lbl\t# unsigned" %} |
| |
| ins_encode(aarch64_enc_br_conU(cmp, lbl)); |
| |
| ins_pipe(pipe_branch_cond); |
| %} |
| |
| // Make use of CBZ and CBNZ. These instructions, as well as being |
| // shorter than (cmp; branch), have the additional benefit of not |
| // killing the flags. |
| |
| instruct cmpI_imm0_branch(cmpOpEqNe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsReg cr) %{ |
| match(If cmp (CmpI op1 op2)); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "cbw$cmp $op1, $labl" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| if (cond == Assembler::EQ) |
| __ cbzw($op1$$Register, *L); |
| else |
| __ cbnzw($op1$$Register, *L); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| %} |
| |
| instruct cmpL_imm0_branch(cmpOpEqNe cmp, iRegL op1, immL0 op2, label labl, rFlagsReg cr) %{ |
| match(If cmp (CmpL op1 op2)); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "cb$cmp $op1, $labl" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| if (cond == Assembler::EQ) |
| __ cbz($op1$$Register, *L); |
| else |
| __ cbnz($op1$$Register, *L); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| %} |
| |
| instruct cmpP_imm0_branch(cmpOpEqNe cmp, iRegP op1, immP0 op2, label labl, rFlagsReg cr) %{ |
| match(If cmp (CmpP op1 op2)); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "cb$cmp $op1, $labl" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| if (cond == Assembler::EQ) |
| __ cbz($op1$$Register, *L); |
| else |
| __ cbnz($op1$$Register, *L); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| %} |
| |
| instruct cmpN_imm0_branch(cmpOpEqNe cmp, iRegN op1, immN0 op2, label labl, rFlagsReg cr) %{ |
| match(If cmp (CmpN op1 op2)); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "cbw$cmp $op1, $labl" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| if (cond == Assembler::EQ) |
| __ cbzw($op1$$Register, *L); |
| else |
| __ cbnzw($op1$$Register, *L); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| %} |
| |
| instruct cmpP_narrowOop_imm0_branch(cmpOpEqNe cmp, iRegN oop, immP0 zero, label labl, rFlagsReg cr) %{ |
| match(If cmp (CmpP (DecodeN oop) zero)); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "cb$cmp $oop, $labl" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| if (cond == Assembler::EQ) |
| __ cbzw($oop$$Register, *L); |
| else |
| __ cbnzw($oop$$Register, *L); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| %} |
| |
| instruct cmpUI_imm0_branch(cmpOpUEqNeLtGe cmp, iRegIorL2I op1, immI0 op2, label labl, rFlagsRegU cr) %{ |
| match(If cmp (CmpU op1 op2)); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "cbw$cmp $op1, $labl" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| if (cond == Assembler::EQ || cond == Assembler::LS) |
| __ cbzw($op1$$Register, *L); |
| else |
| __ cbnzw($op1$$Register, *L); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| %} |
| |
| instruct cmpUL_imm0_branch(cmpOpUEqNeLtGe cmp, iRegL op1, immL0 op2, label labl, rFlagsRegU cr) %{ |
| match(If cmp (CmpUL op1 op2)); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "cb$cmp $op1, $labl" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| if (cond == Assembler::EQ || cond == Assembler::LS) |
| __ cbz($op1$$Register, *L); |
| else |
| __ cbnz($op1$$Register, *L); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| %} |
| |
| // Test bit and Branch |
| |
| // Patterns for short (< 32KiB) variants |
| instruct cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{ |
| match(If cmp (CmpL op1 op2)); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "cb$cmp $op1, $labl # long" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = |
| ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ; |
| __ tbr(cond, $op1$$Register, 63, *L); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| ins_short_branch(1); |
| %} |
| |
| instruct cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{ |
| match(If cmp (CmpI op1 op2)); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "cb$cmp $op1, $labl # int" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = |
| ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ; |
| __ tbr(cond, $op1$$Register, 31, *L); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| ins_short_branch(1); |
| %} |
| |
| instruct cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{ |
| match(If cmp (CmpL (AndL op1 op2) op3)); |
| predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long())); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "tb$cmp $op1, $op2, $labl" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| int bit = exact_log2($op2$$constant); |
| __ tbr(cond, $op1$$Register, bit, *L); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| ins_short_branch(1); |
| %} |
| |
| instruct cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{ |
| match(If cmp (CmpI (AndI op1 op2) op3)); |
| predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int())); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "tb$cmp $op1, $op2, $labl" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| int bit = exact_log2($op2$$constant); |
| __ tbr(cond, $op1$$Register, bit, *L); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| ins_short_branch(1); |
| %} |
| |
| // And far variants |
| instruct far_cmpL_branch_sign(cmpOpLtGe cmp, iRegL op1, immL0 op2, label labl) %{ |
| match(If cmp (CmpL op1 op2)); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "cb$cmp $op1, $labl # long" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = |
| ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ; |
| __ tbr(cond, $op1$$Register, 63, *L, /*far*/true); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| %} |
| |
| instruct far_cmpI_branch_sign(cmpOpLtGe cmp, iRegIorL2I op1, immI0 op2, label labl) %{ |
| match(If cmp (CmpI op1 op2)); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "cb$cmp $op1, $labl # int" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = |
| ((Assembler::Condition)$cmp$$cmpcode == Assembler::LT) ? Assembler::NE : Assembler::EQ; |
| __ tbr(cond, $op1$$Register, 31, *L, /*far*/true); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| %} |
| |
| instruct far_cmpL_branch_bit(cmpOpEqNe cmp, iRegL op1, immL op2, immL0 op3, label labl) %{ |
| match(If cmp (CmpL (AndL op1 op2) op3)); |
| predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_long())); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "tb$cmp $op1, $op2, $labl" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| int bit = exact_log2($op2$$constant); |
| __ tbr(cond, $op1$$Register, bit, *L, /*far*/true); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| %} |
| |
| instruct far_cmpI_branch_bit(cmpOpEqNe cmp, iRegIorL2I op1, immI op2, immI0 op3, label labl) %{ |
| match(If cmp (CmpI (AndI op1 op2) op3)); |
| predicate(is_power_of_2(n->in(2)->in(1)->in(2)->get_int())); |
| effect(USE labl); |
| |
| ins_cost(BRANCH_COST); |
| format %{ "tb$cmp $op1, $op2, $labl" %} |
| ins_encode %{ |
| Label* L = $labl$$label; |
| Assembler::Condition cond = (Assembler::Condition)$cmp$$cmpcode; |
| int bit = exact_log2($op2$$constant); |
| __ tbr(cond, $op1$$Register, bit, *L, /*far*/true); |
| %} |
| ins_pipe(pipe_cmp_branch); |
| %} |
| |
| // Test bits |
| |
| instruct cmpL_and(cmpOp cmp, iRegL op1, immL op2, immL0 op3, rFlagsReg cr) %{ |
| match(Set cr (CmpL (AndL op1 op2) op3)); |
| predicate(Assembler::operand_valid_for_logical_immediate |
| (/*is_32*/false, n->in(1)->in(2)->get_long())); |
| |
| ins_cost(INSN_COST); |
| format %{ "tst $op1, $op2 # long" %} |
| ins_encode %{ |
| __ tst($op1$$Register, $op2$$constant); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct cmpI_and(cmpOp cmp, iRegIorL2I op1, immI op2, immI0 op3, rFlagsReg cr) %{ |
| match(Set cr (CmpI (AndI op1 op2) op3)); |
| predicate(Assembler::operand_valid_for_logical_immediate |
| (/*is_32*/true, n->in(1)->in(2)->get_int())); |
| |
| ins_cost(INSN_COST); |
| format %{ "tst $op1, $op2 # int" %} |
| ins_encode %{ |
| __ tstw($op1$$Register, $op2$$constant); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct cmpL_and_reg(cmpOp cmp, iRegL op1, iRegL op2, immL0 op3, rFlagsReg cr) %{ |
| match(Set cr (CmpL (AndL op1 op2) op3)); |
| |
| ins_cost(INSN_COST); |
| format %{ "tst $op1, $op2 # long" %} |
| ins_encode %{ |
| __ tst($op1$$Register, $op2$$Register); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| instruct cmpI_and_reg(cmpOp cmp, iRegIorL2I op1, iRegIorL2I op2, immI0 op3, rFlagsReg cr) %{ |
| match(Set cr (CmpI (AndI op1 op2) op3)); |
| |
| ins_cost(INSN_COST); |
| format %{ "tstw $op1, $op2 # int" %} |
| ins_encode %{ |
| __ tstw($op1$$Register, $op2$$Register); |
| %} |
| ins_pipe(ialu_reg_reg); |
| %} |
| |
| |
| // Conditional Far Branch |
| // Conditional Far Branch Unsigned |
| // TODO: fixme |
| |
| // counted loop end branch near |
| instruct branchLoopEnd(cmpOp cmp, rFlagsReg cr, label lbl) |
| %{ |
| match(CountedLoopEnd cmp cr); |
| |
| effect(USE lbl); |
| |
| ins_cost(BRANCH_COST); |
| // short variant. |
| // ins_short_branch(1); |
| format %{ "b$cmp $lbl \t// counted loop end" %} |
| |
| ins_encode(aarch64_enc_br_con(cmp, lbl)); |
| |
| ins_pipe(pipe_branch); |
| %} |
| |
| // counted loop end branch near Unsigned |
| instruct branchLoopEndU(cmpOpU cmp, rFlagsRegU cr, label lbl) |
| %{ |
| match(CountedLoopEnd cmp cr); |
| |
| effect(USE lbl); |
| |
| ins_cost(BRANCH_COST); |
| // short variant. |
| // ins_short_branch(1); |
| format %{ "b$cmp $lbl \t// counted loop end unsigned" %} |
| |
| ins_encode(aarch64_enc_br_conU(cmp, lbl)); |
| |
| ins_pipe(pipe_branch); |
| %} |
| |
| // counted loop end branch far |
| // counted loop end branch far unsigned |
| // TODO: fixme |
| |
| // ============================================================================ |
| // inlined locking and unlocking |
| |
| instruct cmpFastLock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2) |
| %{ |
| match(Set cr (FastLock object box)); |
| effect(TEMP tmp, TEMP tmp2); |
| |
| // TODO |
| // identify correct cost |
| ins_cost(5 * INSN_COST); |
| format %{ "fastlock $object,$box\t! kills $tmp,$tmp2" %} |
| |
| ins_encode(aarch64_enc_fast_lock(object, box, tmp, tmp2)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| instruct cmpFastUnlock(rFlagsReg cr, iRegP object, iRegP box, iRegPNoSp tmp, iRegPNoSp tmp2) |
| %{ |
| match(Set cr (FastUnlock object box)); |
| effect(TEMP tmp, TEMP tmp2); |
| |
| ins_cost(5 * INSN_COST); |
| format %{ "fastunlock $object,$box\t! kills $tmp, $tmp2" %} |
| |
| ins_encode(aarch64_enc_fast_unlock(object, box, tmp, tmp2)); |
| |
| ins_pipe(pipe_serial); |
| %} |
| |
| |
| // ============================================================================ |
| // Safepoint Instructions |
| |
| // TODO |
| // provide a near and far version of this code |
| |
| instruct safePoint(iRegP poll) |
| %{ |
| match(SafePoint poll); |
| |
| format %{ |
| "ldrw zr, [$poll]\t# Safepoint: poll for GC" |
| %} |
| ins_encode %{ |
| __ read_polling_page(as_Register($poll$$reg), relocInfo::poll_type); |
| %} |
| ins_pipe(pipe_serial); // ins_pipe(iload_reg_mem); |
| %} |
| |
| |
| // ============================================================================ |
| // Procedure Call/Return Instructions |
| |
| // Call Java Static Instruction |
| |
| instruct CallStaticJavaDirect(method meth) |
| %{ |
| match(CallStaticJava); |
| |
| effect(USE meth); |
| |
| ins_cost(CALL_COST); |
| |
| format %{ "call,static $meth \t// ==> " %} |
| |
| ins_encode( aarch64_enc_java_static_call(meth), |
| aarch64_enc_call_epilog ); |
| |
| ins_pipe(pipe_class_call); |
| %} |
| |
| // TO HERE |
| |
| // Call Java Dynamic Instruction |
| instruct CallDynamicJavaDirect(method meth) |
| %{ |
| match(CallDynamicJava); |
| |
| effect(USE meth); |
| |
| ins_cost(CALL_COST); |
| |
| format %{ "CALL,dynamic $meth \t// ==> " %} |
| |
| ins_encode( aarch64_enc_java_dynamic_call(meth), |
| aarch64_enc_call_epilog ); |
| |
| ins_pipe(pipe_class_call); |
| %} |
| |
| // Call Runtime Instruction |
| |
| instruct CallRuntimeDirect(method meth) |
| %{ |
| match(CallRuntime); |
| |
| effect(USE meth); |
| |
| ins_cost(CALL_COST); |
| |
| format %{ "CALL, runtime $meth" %} |
| |
| ins_encode( aarch64_enc_java_to_runtime(meth) ); |
| |
| ins_pipe(pipe_class_call); |
| %} |
| |
| // Call Runtime Instruction |
| |
| instruct CallLeafDirect(method meth) |
| %{ |
| match(CallLeaf); |
| |
| effect(USE meth); |
| |
| ins_cost(CALL_COST); |
| |
| format %{ "CALL, runtime leaf $meth" %} |
| |
| ins_encode( aarch64_enc_java_to_runtime(meth) ); |
| |
| ins_pipe(pipe_class_call); |
| %} |
| |
| // Call Runtime Instruction |
| |
| instruct CallLeafNoFPDirect(method meth) |
| %{ |
| match(CallLeafNoFP); |
| |
| effect(USE meth); |
| |
| ins_cost(CALL_COST); |
| |
| format %{ "CALL, runtime leaf nofp $meth" %} |
| |
| ins_encode( aarch64_enc_java_to_runtime(meth) ); |
| |
| ins_pipe(pipe_class_call); |
| %} |
| |
| // Tail Call; Jump from runtime stub to Java code. |
| // Also known as an 'interprocedural jump'. |
| // Target of jump will eventually return to caller. |
| // TailJump below removes the return address. |
| instruct TailCalljmpInd(iRegPNoSp jump_target, inline_cache_RegP method_oop) |
| %{ |
| match(TailCall jump_target method_oop); |
| |
| ins_cost(CALL_COST); |
| |
| format %{ "br $jump_target\t# $method_oop holds method oop" %} |
| |
| ins_encode(aarch64_enc_tail_call(jump_target)); |
| |
| ins_pipe(pipe_class_call); |
| %} |
| |
| instruct TailjmpInd(iRegPNoSp jump_target, iRegP_R0 ex_oop) |
| %{ |
| match(TailJump jump_target ex_oop); |
| |
| ins_cost(CALL_COST); |
| |
| format %{ "br $jump_target\t# $ex_oop holds exception oop" %} |
| |
| ins_encode(aarch64_enc_tail_jmp(jump_target)); |
| |
| ins_pipe(pipe_class_call); |
| %} |
| |
| // Create exception oop: created by stack-crawling runtime code. |
| // Created exception is now available to this handler, and is setup |
| // just prior to jumping to this handler. No code emitted. |
| // TODO check |
| // should ex_oop be in r0? intel uses rax, ppc cannot use r0 so uses rarg1 |
| instruct CreateException(iRegP_R0 ex_oop) |
| %{ |
| match(Set ex_oop (CreateEx)); |
| |
| format %{ " -- \t// exception oop; no code emitted" %} |
| |
| size(0); |
| |
| ins_encode( /*empty*/ ); |
| |
| ins_pipe(pipe_class_empty); |
| %} |
| |
| // Rethrow exception: The exception oop will come in the first |
| // argument position. Then JUMP (not call) to the rethrow stub code. |
| instruct RethrowException() %{ |
| match(Rethrow); |
| ins_cost(CALL_COST); |
| |
| format %{ "b rethrow_stub" %} |
| |
| ins_encode( aarch64_enc_rethrow() ); |
| |
| ins_pipe(pipe_class_call); |
| %} |
| |
| |
| // Return Instruction |
| // epilog node loads ret address into lr as part of frame pop |
| instruct Ret() |
| %{ |
| match(Return); |
| |
| format %{ "ret\t// return register" %} |
| |
| ins_encode( aarch64_enc_ret() ); |
| |
| ins_pipe(pipe_branch); |
| %} |
| |
| // Die now. |
| instruct ShouldNotReachHere() %{ |
| match(Halt); |
| |
| ins_cost(CALL_COST); |
| format %{ "ShouldNotReachHere" %} |
| |
| ins_encode %{ |
| // +1 so NativeInstruction::is_sigill_zombie_not_entrant() doesn't |
| // return true |
| __ dpcs1(0xdead + 1); |
| %} |
| |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // ============================================================================ |
| // Partial Subtype Check |
| // |
| // superklass array for an instance of the superklass. Set a hidden |
| // internal cache on a hit (cache is checked with exposed code in |
| // gen_subtype_check()). Return NZ for a miss or zero for a hit. The |
| // encoding ALSO sets flags. |
| |
| instruct partialSubtypeCheck(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, rFlagsReg cr) |
| %{ |
| match(Set result (PartialSubtypeCheck sub super)); |
| effect(KILL cr, KILL temp); |
| |
| ins_cost(1100); // slightly larger than the next version |
| format %{ "partialSubtypeCheck $result, $sub, $super" %} |
| |
| ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result)); |
| |
| opcode(0x1); // Force zero of result reg on hit |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct partialSubtypeCheckVsZero(iRegP_R4 sub, iRegP_R0 super, iRegP_R2 temp, iRegP_R5 result, immP0 zero, rFlagsReg cr) |
| %{ |
| match(Set cr (CmpP (PartialSubtypeCheck sub super) zero)); |
| effect(KILL temp, KILL result); |
| |
| ins_cost(1100); // slightly larger than the next version |
| format %{ "partialSubtypeCheck $result, $sub, $super == 0" %} |
| |
| ins_encode(aarch64_enc_partial_subtype_check(sub, super, temp, result)); |
| |
| opcode(0x0); // Don't zero result reg on hit |
| |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_compareU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2, |
| iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr) |
| %{ |
| predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UU); |
| match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); |
| effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); |
| |
| format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1" %} |
| ins_encode %{ |
| // Count is in 8-bit bytes; non-Compact chars are 16 bits. |
| __ string_compare($str1$$Register, $str2$$Register, |
| $cnt1$$Register, $cnt2$$Register, $result$$Register, |
| $tmp1$$Register, |
| fnoreg, fnoreg, StrIntrinsicNode::UU); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_compareL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2, |
| iRegI_R0 result, iRegP_R10 tmp1, rFlagsReg cr) |
| %{ |
| predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LL); |
| match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); |
| effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, KILL cr); |
| |
| format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1" %} |
| ins_encode %{ |
| __ string_compare($str1$$Register, $str2$$Register, |
| $cnt1$$Register, $cnt2$$Register, $result$$Register, |
| $tmp1$$Register, |
| fnoreg, fnoreg, StrIntrinsicNode::LL); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_compareUL(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2, |
| iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr) |
| %{ |
| predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::UL); |
| match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); |
| effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr); |
| |
| format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1" %} |
| ins_encode %{ |
| __ string_compare($str1$$Register, $str2$$Register, |
| $cnt1$$Register, $cnt2$$Register, $result$$Register, |
| $tmp1$$Register, |
| $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::UL); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_compareLU(iRegP_R1 str1, iRegI_R2 cnt1, iRegP_R3 str2, iRegI_R4 cnt2, |
| iRegI_R0 result, vRegD vtmp1, vRegD vtmp2, iRegP_R10 tmp1, rFlagsReg cr) |
| %{ |
| predicate(((StrCompNode*)n)->encoding() == StrIntrinsicNode::LU); |
| match(Set result (StrComp (Binary str1 cnt1) (Binary str2 cnt2))); |
| effect(KILL tmp1, USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, TEMP vtmp1, TEMP vtmp2, KILL cr); |
| |
| format %{ "String Compare $str1,$cnt1,$str2,$cnt2 -> $result # KILL $tmp1" %} |
| ins_encode %{ |
| __ string_compare($str1$$Register, $str2$$Register, |
| $cnt1$$Register, $cnt2$$Register, $result$$Register, |
| $tmp1$$Register, |
| $vtmp1$$FloatRegister, $vtmp2$$FloatRegister, StrIntrinsicNode::LU); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_indexofUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2, |
| iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) |
| %{ |
| predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU); |
| match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); |
| effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, |
| TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); |
| format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UU)" %} |
| |
| ins_encode %{ |
| __ string_indexof($str1$$Register, $str2$$Register, |
| $cnt1$$Register, $cnt2$$Register, |
| $tmp1$$Register, $tmp2$$Register, |
| $tmp3$$Register, $tmp4$$Register, |
| -1, $result$$Register, StrIntrinsicNode::UU); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_indexofLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2, |
| iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) |
| %{ |
| predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL); |
| match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); |
| effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, |
| TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); |
| format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LL)" %} |
| |
| ins_encode %{ |
| __ string_indexof($str1$$Register, $str2$$Register, |
| $cnt1$$Register, $cnt2$$Register, |
| $tmp1$$Register, $tmp2$$Register, |
| $tmp3$$Register, $tmp4$$Register, |
| -1, $result$$Register, StrIntrinsicNode::LL); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_indexofUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2, |
| iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) |
| %{ |
| predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL); |
| match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); |
| effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, |
| TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); |
| format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (UL)" %} |
| |
| ins_encode %{ |
| __ string_indexof($str1$$Register, $str2$$Register, |
| $cnt1$$Register, $cnt2$$Register, |
| $tmp1$$Register, $tmp2$$Register, |
| $tmp3$$Register, $tmp4$$Register, |
| -1, $result$$Register, StrIntrinsicNode::UL); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_indexofLU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, iRegI_R2 cnt2, |
| iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) |
| %{ |
| predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LU); |
| match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 cnt2))); |
| effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, USE_KILL cnt2, |
| TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); |
| format %{ "String IndexOf $str1,$cnt1,$str2,$cnt2 -> $result (LU)" %} |
| |
| ins_encode %{ |
| __ string_indexof($str1$$Register, $str2$$Register, |
| $cnt1$$Register, $cnt2$$Register, |
| $tmp1$$Register, $tmp2$$Register, |
| $tmp3$$Register, $tmp4$$Register, |
| -1, $result$$Register, StrIntrinsicNode::LU); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_indexof_conUU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, |
| immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, |
| iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) |
| %{ |
| predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UU); |
| match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); |
| effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, |
| TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); |
| format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UU)" %} |
| |
| ins_encode %{ |
| int icnt2 = (int)$int_cnt2$$constant; |
| __ string_indexof($str1$$Register, $str2$$Register, |
| $cnt1$$Register, zr, |
| $tmp1$$Register, $tmp2$$Register, |
| $tmp3$$Register, $tmp4$$Register, |
| icnt2, $result$$Register, StrIntrinsicNode::UU); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_indexof_conLL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, |
| immI_le_4 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, |
| iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) |
| %{ |
| predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LL); |
| match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); |
| effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, |
| TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); |
| format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LL)" %} |
| |
| ins_encode %{ |
| int icnt2 = (int)$int_cnt2$$constant; |
| __ string_indexof($str1$$Register, $str2$$Register, |
| $cnt1$$Register, zr, |
| $tmp1$$Register, $tmp2$$Register, |
| $tmp3$$Register, $tmp4$$Register, |
| icnt2, $result$$Register, StrIntrinsicNode::LL); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_indexof_conUL(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, |
| immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, |
| iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) |
| %{ |
| predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::UL); |
| match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); |
| effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, |
| TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); |
| format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (UL)" %} |
| |
| ins_encode %{ |
| int icnt2 = (int)$int_cnt2$$constant; |
| __ string_indexof($str1$$Register, $str2$$Register, |
| $cnt1$$Register, zr, |
| $tmp1$$Register, $tmp2$$Register, |
| $tmp3$$Register, $tmp4$$Register, |
| icnt2, $result$$Register, StrIntrinsicNode::UL); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_indexof_conLU(iRegP_R1 str1, iRegI_R4 cnt1, iRegP_R3 str2, |
| immI_1 int_cnt2, iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, |
| iRegINoSp tmp3, iRegINoSp tmp4, rFlagsReg cr) |
| %{ |
| predicate(((StrIndexOfNode*)n)->encoding() == StrIntrinsicNode::LU); |
| match(Set result (StrIndexOf (Binary str1 cnt1) (Binary str2 int_cnt2))); |
| effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt1, |
| TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, KILL cr); |
| format %{ "String IndexOf $str1,$cnt1,$str2,$int_cnt2 -> $result (LU)" %} |
| |
| ins_encode %{ |
| int icnt2 = (int)$int_cnt2$$constant; |
| __ string_indexof($str1$$Register, $str2$$Register, |
| $cnt1$$Register, zr, |
| $tmp1$$Register, $tmp2$$Register, |
| $tmp3$$Register, $tmp4$$Register, |
| icnt2, $result$$Register, StrIntrinsicNode::LU); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_indexofU_char(iRegP_R1 str1, iRegI_R2 cnt1, iRegI_R3 ch, |
| iRegI_R0 result, iRegINoSp tmp1, iRegINoSp tmp2, |
| iRegINoSp tmp3, rFlagsReg cr) |
| %{ |
| match(Set result (StrIndexOfChar (Binary str1 cnt1) ch)); |
| effect(USE_KILL str1, USE_KILL cnt1, USE_KILL ch, |
| TEMP tmp1, TEMP tmp2, TEMP tmp3, KILL cr); |
| |
| format %{ "String IndexOf char[] $str1,$cnt1,$ch -> $result" %} |
| |
| ins_encode %{ |
| __ string_indexof_char($str1$$Register, $cnt1$$Register, $ch$$Register, |
| $result$$Register, $tmp1$$Register, $tmp2$$Register, |
| $tmp3$$Register); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_equalsL(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt, |
| iRegI_R0 result, rFlagsReg cr) |
| %{ |
| predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::LL); |
| match(Set result (StrEquals (Binary str1 str2) cnt)); |
| effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr); |
| |
| format %{ "String Equals $str1,$str2,$cnt -> $result" %} |
| ins_encode %{ |
| // Count is in 8-bit bytes; non-Compact chars are 16 bits. |
| __ arrays_equals($str1$$Register, $str2$$Register, |
| $result$$Register, $cnt$$Register, |
| 1, /*is_string*/true); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct string_equalsU(iRegP_R1 str1, iRegP_R3 str2, iRegI_R4 cnt, |
| iRegI_R0 result, rFlagsReg cr) |
| %{ |
| predicate(((StrEqualsNode*)n)->encoding() == StrIntrinsicNode::UU); |
| match(Set result (StrEquals (Binary str1 str2) cnt)); |
| effect(USE_KILL str1, USE_KILL str2, USE_KILL cnt, KILL cr); |
| |
| format %{ "String Equals $str1,$str2,$cnt -> $result" %} |
| ins_encode %{ |
| // Count is in 8-bit bytes; non-Compact chars are 16 bits. |
| __ asrw($cnt$$Register, $cnt$$Register, 1); |
| __ arrays_equals($str1$$Register, $str2$$Register, |
| $result$$Register, $cnt$$Register, |
| 2, /*is_string*/true); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct array_equalsB(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result, |
| iRegP_R10 tmp, rFlagsReg cr) |
| %{ |
| predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::LL); |
| match(Set result (AryEq ary1 ary2)); |
| effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr); |
| |
| format %{ "Array Equals $ary1,ary2 -> $result // KILL $tmp" %} |
| ins_encode %{ |
| __ arrays_equals($ary1$$Register, $ary2$$Register, |
| $result$$Register, $tmp$$Register, |
| 1, /*is_string*/false); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct array_equalsC(iRegP_R1 ary1, iRegP_R2 ary2, iRegI_R0 result, |
| iRegP_R10 tmp, rFlagsReg cr) |
| %{ |
| predicate(((AryEqNode*)n)->encoding() == StrIntrinsicNode::UU); |
| match(Set result (AryEq ary1 ary2)); |
| effect(KILL tmp, USE_KILL ary1, USE_KILL ary2, KILL cr); |
| |
| format %{ "Array Equals $ary1,ary2 -> $result // KILL $tmp" %} |
| ins_encode %{ |
| __ arrays_equals($ary1$$Register, $ary2$$Register, |
| $result$$Register, $tmp$$Register, |
| 2, /*is_string*/false); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| instruct has_negatives(iRegP_R1 ary1, iRegI_R2 len, iRegI_R0 result, rFlagsReg cr) |
| %{ |
| match(Set result (HasNegatives ary1 len)); |
| effect(USE_KILL ary1, USE_KILL len, KILL cr); |
| format %{ "has negatives byte[] $ary1,$len -> $result" %} |
| ins_encode %{ |
| __ has_negatives($ary1$$Register, $len$$Register, $result$$Register); |
| %} |
| ins_pipe( pipe_slow ); |
| %} |
| |
| // fast char[] to byte[] compression |
| instruct string_compress(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len, |
| vRegD_V0 tmp1, vRegD_V1 tmp2, |
| vRegD_V2 tmp3, vRegD_V3 tmp4, |
| iRegI_R0 result, rFlagsReg cr) |
| %{ |
| match(Set result (StrCompressedCopy src (Binary dst len))); |
| effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr); |
| |
| format %{ "String Compress $src,$dst -> $result // KILL R1, R2, R3, R4" %} |
| ins_encode %{ |
| __ char_array_compress($src$$Register, $dst$$Register, $len$$Register, |
| $tmp1$$FloatRegister, $tmp2$$FloatRegister, |
| $tmp3$$FloatRegister, $tmp4$$FloatRegister, |
| $result$$Register); |
| %} |
| ins_pipe( pipe_slow ); |
| %} |
| |
| // fast byte[] to char[] inflation |
| instruct string_inflate(Universe dummy, iRegP_R0 src, iRegP_R1 dst, iRegI_R2 len, |
| vRegD tmp1, vRegD tmp2, vRegD tmp3, iRegP_R3 tmp4, rFlagsReg cr) |
| %{ |
| match(Set dummy (StrInflatedCopy src (Binary dst len))); |
| effect(TEMP tmp1, TEMP tmp2, TEMP tmp3, TEMP tmp4, USE_KILL src, USE_KILL dst, USE_KILL len, KILL cr); |
| |
| format %{ "String Inflate $src,$dst // KILL $tmp1, $tmp2" %} |
| ins_encode %{ |
| __ byte_array_inflate($src$$Register, $dst$$Register, $len$$Register, |
| $tmp1$$FloatRegister, $tmp2$$FloatRegister, $tmp3$$FloatRegister, $tmp4$$Register); |
| %} |
| ins_pipe(pipe_class_memory); |
| %} |
| |
| // encode char[] to byte[] in ISO_8859_1 |
| instruct encode_iso_array(iRegP_R2 src, iRegP_R1 dst, iRegI_R3 len, |
| vRegD_V0 Vtmp1, vRegD_V1 Vtmp2, |
| vRegD_V2 Vtmp3, vRegD_V3 Vtmp4, |
| iRegI_R0 result, rFlagsReg cr) |
| %{ |
| match(Set result (EncodeISOArray src (Binary dst len))); |
| effect(USE_KILL src, USE_KILL dst, USE_KILL len, |
| KILL Vtmp1, KILL Vtmp2, KILL Vtmp3, KILL Vtmp4, KILL cr); |
| |
| format %{ "Encode array $src,$dst,$len -> $result" %} |
| ins_encode %{ |
| __ encode_iso_array($src$$Register, $dst$$Register, $len$$Register, |
| $result$$Register, $Vtmp1$$FloatRegister, $Vtmp2$$FloatRegister, |
| $Vtmp3$$FloatRegister, $Vtmp4$$FloatRegister); |
| %} |
| ins_pipe( pipe_class_memory ); |
| %} |
| |
| // ============================================================================ |
| // This name is KNOWN by the ADLC and cannot be changed. |
| // The ADLC forces a 'TypeRawPtr::BOTTOM' output type |
| // for this guy. |
| instruct tlsLoadP(thread_RegP dst) |
| %{ |
| match(Set dst (ThreadLocal)); |
| |
| ins_cost(0); |
| |
| format %{ " -- \t// $dst=Thread::current(), empty" %} |
| |
| size(0); |
| |
| ins_encode( /*empty*/ ); |
| |
| ins_pipe(pipe_class_empty); |
| %} |
| |
| // ====================VECTOR INSTRUCTIONS===================================== |
| |
| // Load vector (32 bits) |
| instruct loadV4(vecD dst, vmem4 mem) |
| %{ |
| predicate(n->as_LoadVector()->memory_size() == 4); |
| match(Set dst (LoadVector mem)); |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrs $dst,$mem\t# vector (32 bits)" %} |
| ins_encode( aarch64_enc_ldrvS(dst, mem) ); |
| ins_pipe(vload_reg_mem64); |
| %} |
| |
| // Load vector (64 bits) |
| instruct loadV8(vecD dst, vmem8 mem) |
| %{ |
| predicate(n->as_LoadVector()->memory_size() == 8); |
| match(Set dst (LoadVector mem)); |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrd $dst,$mem\t# vector (64 bits)" %} |
| ins_encode( aarch64_enc_ldrvD(dst, mem) ); |
| ins_pipe(vload_reg_mem64); |
| %} |
| |
| // Load Vector (128 bits) |
| instruct loadV16(vecX dst, vmem16 mem) |
| %{ |
| predicate(n->as_LoadVector()->memory_size() == 16); |
| match(Set dst (LoadVector mem)); |
| ins_cost(4 * INSN_COST); |
| format %{ "ldrq $dst,$mem\t# vector (128 bits)" %} |
| ins_encode( aarch64_enc_ldrvQ(dst, mem) ); |
| ins_pipe(vload_reg_mem128); |
| %} |
| |
| // Store Vector (32 bits) |
| instruct storeV4(vecD src, vmem4 mem) |
| %{ |
| predicate(n->as_StoreVector()->memory_size() == 4); |
| match(Set mem (StoreVector mem src)); |
| ins_cost(4 * INSN_COST); |
| format %{ "strs $mem,$src\t# vector (32 bits)" %} |
| ins_encode( aarch64_enc_strvS(src, mem) ); |
| ins_pipe(vstore_reg_mem64); |
| %} |
| |
| // Store Vector (64 bits) |
| instruct storeV8(vecD src, vmem8 mem) |
| %{ |
| predicate(n->as_StoreVector()->memory_size() == 8); |
| match(Set mem (StoreVector mem src)); |
| ins_cost(4 * INSN_COST); |
| format %{ "strd $mem,$src\t# vector (64 bits)" %} |
| ins_encode( aarch64_enc_strvD(src, mem) ); |
| ins_pipe(vstore_reg_mem64); |
| %} |
| |
| // Store Vector (128 bits) |
| instruct storeV16(vecX src, vmem16 mem) |
| %{ |
| predicate(n->as_StoreVector()->memory_size() == 16); |
| match(Set mem (StoreVector mem src)); |
| ins_cost(4 * INSN_COST); |
| format %{ "strq $mem,$src\t# vector (128 bits)" %} |
| ins_encode( aarch64_enc_strvQ(src, mem) ); |
| ins_pipe(vstore_reg_mem128); |
| %} |
| |
| instruct replicate8B(vecD dst, iRegIorL2I src) |
| %{ |
| predicate(n->as_Vector()->length() == 4 || |
| n->as_Vector()->length() == 8); |
| match(Set dst (ReplicateB src)); |
| ins_cost(INSN_COST); |
| format %{ "dup $dst, $src\t# vector (8B)" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T8B, as_Register($src$$reg)); |
| %} |
| ins_pipe(vdup_reg_reg64); |
| %} |
| |
| instruct replicate16B(vecX dst, iRegIorL2I src) |
| %{ |
| predicate(n->as_Vector()->length() == 16); |
| match(Set dst (ReplicateB src)); |
| ins_cost(INSN_COST); |
| format %{ "dup $dst, $src\t# vector (16B)" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($src$$reg)); |
| %} |
| ins_pipe(vdup_reg_reg128); |
| %} |
| |
| instruct replicate8B_imm(vecD dst, immI con) |
| %{ |
| predicate(n->as_Vector()->length() == 4 || |
| n->as_Vector()->length() == 8); |
| match(Set dst (ReplicateB con)); |
| ins_cost(INSN_COST); |
| format %{ "movi $dst, $con\t# vector(8B)" %} |
| ins_encode %{ |
| __ mov(as_FloatRegister($dst$$reg), __ T8B, $con$$constant & 0xff); |
| %} |
| ins_pipe(vmovi_reg_imm64); |
| %} |
| |
| instruct replicate16B_imm(vecX dst, immI con) |
| %{ |
| predicate(n->as_Vector()->length() == 16); |
| match(Set dst (ReplicateB con)); |
| ins_cost(INSN_COST); |
| format %{ "movi $dst, $con\t# vector(16B)" %} |
| ins_encode %{ |
| __ mov(as_FloatRegister($dst$$reg), __ T16B, $con$$constant & 0xff); |
| %} |
| ins_pipe(vmovi_reg_imm128); |
| %} |
| |
| instruct replicate4S(vecD dst, iRegIorL2I src) |
| %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (ReplicateS src)); |
| ins_cost(INSN_COST); |
| format %{ "dup $dst, $src\t# vector (4S)" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T4H, as_Register($src$$reg)); |
| %} |
| ins_pipe(vdup_reg_reg64); |
| %} |
| |
| instruct replicate8S(vecX dst, iRegIorL2I src) |
| %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (ReplicateS src)); |
| ins_cost(INSN_COST); |
| format %{ "dup $dst, $src\t# vector (8S)" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T8H, as_Register($src$$reg)); |
| %} |
| ins_pipe(vdup_reg_reg128); |
| %} |
| |
| instruct replicate4S_imm(vecD dst, immI con) |
| %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (ReplicateS con)); |
| ins_cost(INSN_COST); |
| format %{ "movi $dst, $con\t# vector(4H)" %} |
| ins_encode %{ |
| __ mov(as_FloatRegister($dst$$reg), __ T4H, $con$$constant & 0xffff); |
| %} |
| ins_pipe(vmovi_reg_imm64); |
| %} |
| |
| instruct replicate8S_imm(vecX dst, immI con) |
| %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (ReplicateS con)); |
| ins_cost(INSN_COST); |
| format %{ "movi $dst, $con\t# vector(8H)" %} |
| ins_encode %{ |
| __ mov(as_FloatRegister($dst$$reg), __ T8H, $con$$constant & 0xffff); |
| %} |
| ins_pipe(vmovi_reg_imm128); |
| %} |
| |
| instruct replicate2I(vecD dst, iRegIorL2I src) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (ReplicateI src)); |
| ins_cost(INSN_COST); |
| format %{ "dup $dst, $src\t# vector (2I)" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T2S, as_Register($src$$reg)); |
| %} |
| ins_pipe(vdup_reg_reg64); |
| %} |
| |
| instruct replicate4I(vecX dst, iRegIorL2I src) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (ReplicateI src)); |
| ins_cost(INSN_COST); |
| format %{ "dup $dst, $src\t# vector (4I)" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T4S, as_Register($src$$reg)); |
| %} |
| ins_pipe(vdup_reg_reg128); |
| %} |
| |
| instruct replicate2I_imm(vecD dst, immI con) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (ReplicateI con)); |
| ins_cost(INSN_COST); |
| format %{ "movi $dst, $con\t# vector(2I)" %} |
| ins_encode %{ |
| __ mov(as_FloatRegister($dst$$reg), __ T2S, $con$$constant); |
| %} |
| ins_pipe(vmovi_reg_imm64); |
| %} |
| |
| instruct replicate4I_imm(vecX dst, immI con) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (ReplicateI con)); |
| ins_cost(INSN_COST); |
| format %{ "movi $dst, $con\t# vector(4I)" %} |
| ins_encode %{ |
| __ mov(as_FloatRegister($dst$$reg), __ T4S, $con$$constant); |
| %} |
| ins_pipe(vmovi_reg_imm128); |
| %} |
| |
| instruct replicate2L(vecX dst, iRegL src) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (ReplicateL src)); |
| ins_cost(INSN_COST); |
| format %{ "dup $dst, $src\t# vector (2L)" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T2D, as_Register($src$$reg)); |
| %} |
| ins_pipe(vdup_reg_reg128); |
| %} |
| |
| instruct replicate2L_zero(vecX dst, immI0 zero) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (ReplicateI zero)); |
| ins_cost(INSN_COST); |
| format %{ "movi $dst, $zero\t# vector(4I)" %} |
| ins_encode %{ |
| __ eor(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($dst$$reg), |
| as_FloatRegister($dst$$reg)); |
| %} |
| ins_pipe(vmovi_reg_imm128); |
| %} |
| |
| instruct replicate2F(vecD dst, vRegF src) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (ReplicateF src)); |
| ins_cost(INSN_COST); |
| format %{ "dup $dst, $src\t# vector (2F)" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src$$reg)); |
| %} |
| ins_pipe(vdup_reg_freg64); |
| %} |
| |
| instruct replicate4F(vecX dst, vRegF src) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (ReplicateF src)); |
| ins_cost(INSN_COST); |
| format %{ "dup $dst, $src\t# vector (4F)" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src$$reg)); |
| %} |
| ins_pipe(vdup_reg_freg128); |
| %} |
| |
| instruct replicate2D(vecX dst, vRegD src) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (ReplicateD src)); |
| ins_cost(INSN_COST); |
| format %{ "dup $dst, $src\t# vector (2D)" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src$$reg)); |
| %} |
| ins_pipe(vdup_reg_dreg128); |
| %} |
| |
| // ====================REDUCTION ARITHMETIC==================================== |
| |
| instruct reduce_add2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp, iRegINoSp tmp2) |
| %{ |
| match(Set dst (AddReductionVI src1 src2)); |
| ins_cost(INSN_COST); |
| effect(TEMP tmp, TEMP tmp2); |
| format %{ "umov $tmp, $src2, S, 0\n\t" |
| "umov $tmp2, $src2, S, 1\n\t" |
| "addw $dst, $src1, $tmp\n\t" |
| "addw $dst, $dst, $tmp2\t add reduction2i" |
| %} |
| ins_encode %{ |
| __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0); |
| __ umov($tmp2$$Register, as_FloatRegister($src2$$reg), __ S, 1); |
| __ addw($dst$$Register, $src1$$Register, $tmp$$Register); |
| __ addw($dst$$Register, $dst$$Register, $tmp2$$Register); |
| %} |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct reduce_add4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2) |
| %{ |
| match(Set dst (AddReductionVI src1 src2)); |
| ins_cost(INSN_COST); |
| effect(TEMP tmp, TEMP tmp2); |
| format %{ "addv $tmp, T4S, $src2\n\t" |
| "umov $tmp2, $tmp, S, 0\n\t" |
| "addw $dst, $tmp2, $src1\t add reduction4i" |
| %} |
| ins_encode %{ |
| __ addv(as_FloatRegister($tmp$$reg), __ T4S, |
| as_FloatRegister($src2$$reg)); |
| __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0); |
| __ addw($dst$$Register, $tmp2$$Register, $src1$$Register); |
| %} |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct reduce_mul2I(iRegINoSp dst, iRegIorL2I src1, vecD src2, iRegINoSp tmp) |
| %{ |
| match(Set dst (MulReductionVI src1 src2)); |
| ins_cost(INSN_COST); |
| effect(TEMP tmp, TEMP dst); |
| format %{ "umov $tmp, $src2, S, 0\n\t" |
| "mul $dst, $tmp, $src1\n\t" |
| "umov $tmp, $src2, S, 1\n\t" |
| "mul $dst, $tmp, $dst\t mul reduction2i\n\t" |
| %} |
| ins_encode %{ |
| __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 0); |
| __ mul($dst$$Register, $tmp$$Register, $src1$$Register); |
| __ umov($tmp$$Register, as_FloatRegister($src2$$reg), __ S, 1); |
| __ mul($dst$$Register, $tmp$$Register, $dst$$Register); |
| %} |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct reduce_mul4I(iRegINoSp dst, iRegIorL2I src1, vecX src2, vecX tmp, iRegINoSp tmp2) |
| %{ |
| match(Set dst (MulReductionVI src1 src2)); |
| ins_cost(INSN_COST); |
| effect(TEMP tmp, TEMP tmp2, TEMP dst); |
| format %{ "ins $tmp, $src2, 0, 1\n\t" |
| "mul $tmp, $tmp, $src2\n\t" |
| "umov $tmp2, $tmp, S, 0\n\t" |
| "mul $dst, $tmp2, $src1\n\t" |
| "umov $tmp2, $tmp, S, 1\n\t" |
| "mul $dst, $tmp2, $dst\t mul reduction4i\n\t" |
| %} |
| ins_encode %{ |
| __ ins(as_FloatRegister($tmp$$reg), __ D, |
| as_FloatRegister($src2$$reg), 0, 1); |
| __ mulv(as_FloatRegister($tmp$$reg), __ T2S, |
| as_FloatRegister($tmp$$reg), as_FloatRegister($src2$$reg)); |
| __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 0); |
| __ mul($dst$$Register, $tmp2$$Register, $src1$$Register); |
| __ umov($tmp2$$Register, as_FloatRegister($tmp$$reg), __ S, 1); |
| __ mul($dst$$Register, $tmp2$$Register, $dst$$Register); |
| %} |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct reduce_add2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) |
| %{ |
| match(Set dst (AddReductionVF src1 src2)); |
| ins_cost(INSN_COST); |
| effect(TEMP tmp, TEMP dst); |
| format %{ "fadds $dst, $src1, $src2\n\t" |
| "ins $tmp, S, $src2, 0, 1\n\t" |
| "fadds $dst, $dst, $tmp\t add reduction2f" |
| %} |
| ins_encode %{ |
| __ fadds(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); |
| __ ins(as_FloatRegister($tmp$$reg), __ S, |
| as_FloatRegister($src2$$reg), 0, 1); |
| __ fadds(as_FloatRegister($dst$$reg), |
| as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); |
| %} |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct reduce_add4F(vRegF dst, vRegF src1, vecX src2, vecX tmp) |
| %{ |
| match(Set dst (AddReductionVF src1 src2)); |
| ins_cost(INSN_COST); |
| effect(TEMP tmp, TEMP dst); |
| format %{ "fadds $dst, $src1, $src2\n\t" |
| "ins $tmp, S, $src2, 0, 1\n\t" |
| "fadds $dst, $dst, $tmp\n\t" |
| "ins $tmp, S, $src2, 0, 2\n\t" |
| "fadds $dst, $dst, $tmp\n\t" |
| "ins $tmp, S, $src2, 0, 3\n\t" |
| "fadds $dst, $dst, $tmp\t add reduction4f" |
| %} |
| ins_encode %{ |
| __ fadds(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); |
| __ ins(as_FloatRegister($tmp$$reg), __ S, |
| as_FloatRegister($src2$$reg), 0, 1); |
| __ fadds(as_FloatRegister($dst$$reg), |
| as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); |
| __ ins(as_FloatRegister($tmp$$reg), __ S, |
| as_FloatRegister($src2$$reg), 0, 2); |
| __ fadds(as_FloatRegister($dst$$reg), |
| as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); |
| __ ins(as_FloatRegister($tmp$$reg), __ S, |
| as_FloatRegister($src2$$reg), 0, 3); |
| __ fadds(as_FloatRegister($dst$$reg), |
| as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); |
| %} |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct reduce_mul2F(vRegF dst, vRegF src1, vecD src2, vecD tmp) |
| %{ |
| match(Set dst (MulReductionVF src1 src2)); |
| ins_cost(INSN_COST); |
| effect(TEMP tmp, TEMP dst); |
| format %{ "fmuls $dst, $src1, $src2\n\t" |
| "ins $tmp, S, $src2, 0, 1\n\t" |
| "fmuls $dst, $dst, $tmp\t add reduction4f" |
| %} |
| ins_encode %{ |
| __ fmuls(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); |
| __ ins(as_FloatRegister($tmp$$reg), __ S, |
| as_FloatRegister($src2$$reg), 0, 1); |
| __ fmuls(as_FloatRegister($dst$$reg), |
| as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); |
| %} |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct reduce_mul4F(vRegF dst, vRegF src1, vecX src2, vecX tmp) |
| %{ |
| match(Set dst (MulReductionVF src1 src2)); |
| ins_cost(INSN_COST); |
| effect(TEMP tmp, TEMP dst); |
| format %{ "fmuls $dst, $src1, $src2\n\t" |
| "ins $tmp, S, $src2, 0, 1\n\t" |
| "fmuls $dst, $dst, $tmp\n\t" |
| "ins $tmp, S, $src2, 0, 2\n\t" |
| "fmuls $dst, $dst, $tmp\n\t" |
| "ins $tmp, S, $src2, 0, 3\n\t" |
| "fmuls $dst, $dst, $tmp\t add reduction4f" |
| %} |
| ins_encode %{ |
| __ fmuls(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); |
| __ ins(as_FloatRegister($tmp$$reg), __ S, |
| as_FloatRegister($src2$$reg), 0, 1); |
| __ fmuls(as_FloatRegister($dst$$reg), |
| as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); |
| __ ins(as_FloatRegister($tmp$$reg), __ S, |
| as_FloatRegister($src2$$reg), 0, 2); |
| __ fmuls(as_FloatRegister($dst$$reg), |
| as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); |
| __ ins(as_FloatRegister($tmp$$reg), __ S, |
| as_FloatRegister($src2$$reg), 0, 3); |
| __ fmuls(as_FloatRegister($dst$$reg), |
| as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); |
| %} |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct reduce_add2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) |
| %{ |
| match(Set dst (AddReductionVD src1 src2)); |
| ins_cost(INSN_COST); |
| effect(TEMP tmp, TEMP dst); |
| format %{ "faddd $dst, $src1, $src2\n\t" |
| "ins $tmp, D, $src2, 0, 1\n\t" |
| "faddd $dst, $dst, $tmp\t add reduction2d" |
| %} |
| ins_encode %{ |
| __ faddd(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); |
| __ ins(as_FloatRegister($tmp$$reg), __ D, |
| as_FloatRegister($src2$$reg), 0, 1); |
| __ faddd(as_FloatRegister($dst$$reg), |
| as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); |
| %} |
| ins_pipe(pipe_class_default); |
| %} |
| |
| instruct reduce_mul2D(vRegD dst, vRegD src1, vecX src2, vecX tmp) |
| %{ |
| match(Set dst (MulReductionVD src1 src2)); |
| ins_cost(INSN_COST); |
| effect(TEMP tmp, TEMP dst); |
| format %{ "fmuld $dst, $src1, $src2\n\t" |
| "ins $tmp, D, $src2, 0, 1\n\t" |
| "fmuld $dst, $dst, $tmp\t add reduction2d" |
| %} |
| ins_encode %{ |
| __ fmuld(as_FloatRegister($dst$$reg), |
| as_FloatRegister($src1$$reg), as_FloatRegister($src2$$reg)); |
| __ ins(as_FloatRegister($tmp$$reg), __ D, |
| as_FloatRegister($src2$$reg), 0, 1); |
| __ fmuld(as_FloatRegister($dst$$reg), |
| as_FloatRegister($dst$$reg), as_FloatRegister($tmp$$reg)); |
| %} |
| ins_pipe(pipe_class_default); |
| %} |
| |
| // ====================VECTOR ARITHMETIC======================================= |
| |
| // --------------------------------- ADD -------------------------------------- |
| |
| instruct vadd8B(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 4 || |
| n->as_Vector()->length() == 8); |
| match(Set dst (AddVB src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "addv $dst,$src1,$src2\t# vector (8B)" %} |
| ins_encode %{ |
| __ addv(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop64); |
| %} |
| |
| instruct vadd16B(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 16); |
| match(Set dst (AddVB src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "addv $dst,$src1,$src2\t# vector (16B)" %} |
| ins_encode %{ |
| __ addv(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop128); |
| %} |
| |
| instruct vadd4S(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (AddVS src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "addv $dst,$src1,$src2\t# vector (4H)" %} |
| ins_encode %{ |
| __ addv(as_FloatRegister($dst$$reg), __ T4H, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop64); |
| %} |
| |
| instruct vadd8S(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (AddVS src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "addv $dst,$src1,$src2\t# vector (8H)" %} |
| ins_encode %{ |
| __ addv(as_FloatRegister($dst$$reg), __ T8H, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop128); |
| %} |
| |
| instruct vadd2I(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (AddVI src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "addv $dst,$src1,$src2\t# vector (2S)" %} |
| ins_encode %{ |
| __ addv(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop64); |
| %} |
| |
| instruct vadd4I(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (AddVI src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "addv $dst,$src1,$src2\t# vector (4S)" %} |
| ins_encode %{ |
| __ addv(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop128); |
| %} |
| |
| instruct vadd2L(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (AddVL src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "addv $dst,$src1,$src2\t# vector (2L)" %} |
| ins_encode %{ |
| __ addv(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop128); |
| %} |
| |
| instruct vadd2F(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (AddVF src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fadd $dst,$src1,$src2\t# vector (2S)" %} |
| ins_encode %{ |
| __ fadd(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop_fp64); |
| %} |
| |
| instruct vadd4F(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (AddVF src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fadd $dst,$src1,$src2\t# vector (4S)" %} |
| ins_encode %{ |
| __ fadd(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop_fp128); |
| %} |
| |
| instruct vadd2D(vecX dst, vecX src1, vecX src2) |
| %{ |
| match(Set dst (AddVD src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fadd $dst,$src1,$src2\t# vector (2D)" %} |
| ins_encode %{ |
| __ fadd(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop_fp128); |
| %} |
| |
| // --------------------------------- SUB -------------------------------------- |
| |
| instruct vsub8B(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 4 || |
| n->as_Vector()->length() == 8); |
| match(Set dst (SubVB src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "subv $dst,$src1,$src2\t# vector (8B)" %} |
| ins_encode %{ |
| __ subv(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop64); |
| %} |
| |
| instruct vsub16B(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 16); |
| match(Set dst (SubVB src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "subv $dst,$src1,$src2\t# vector (16B)" %} |
| ins_encode %{ |
| __ subv(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop128); |
| %} |
| |
| instruct vsub4S(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (SubVS src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "subv $dst,$src1,$src2\t# vector (4H)" %} |
| ins_encode %{ |
| __ subv(as_FloatRegister($dst$$reg), __ T4H, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop64); |
| %} |
| |
| instruct vsub8S(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (SubVS src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "subv $dst,$src1,$src2\t# vector (8H)" %} |
| ins_encode %{ |
| __ subv(as_FloatRegister($dst$$reg), __ T8H, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop128); |
| %} |
| |
| instruct vsub2I(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (SubVI src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "subv $dst,$src1,$src2\t# vector (2S)" %} |
| ins_encode %{ |
| __ subv(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop64); |
| %} |
| |
| instruct vsub4I(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (SubVI src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "subv $dst,$src1,$src2\t# vector (4S)" %} |
| ins_encode %{ |
| __ subv(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop128); |
| %} |
| |
| instruct vsub2L(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (SubVL src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "subv $dst,$src1,$src2\t# vector (2L)" %} |
| ins_encode %{ |
| __ subv(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop128); |
| %} |
| |
| instruct vsub2F(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (SubVF src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fsub $dst,$src1,$src2\t# vector (2S)" %} |
| ins_encode %{ |
| __ fsub(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop_fp64); |
| %} |
| |
| instruct vsub4F(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (SubVF src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fsub $dst,$src1,$src2\t# vector (4S)" %} |
| ins_encode %{ |
| __ fsub(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop_fp128); |
| %} |
| |
| instruct vsub2D(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (SubVD src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fsub $dst,$src1,$src2\t# vector (2D)" %} |
| ins_encode %{ |
| __ fsub(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vdop_fp128); |
| %} |
| |
| // --------------------------------- MUL -------------------------------------- |
| |
| instruct vmul4S(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (MulVS src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "mulv $dst,$src1,$src2\t# vector (4H)" %} |
| ins_encode %{ |
| __ mulv(as_FloatRegister($dst$$reg), __ T4H, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmul64); |
| %} |
| |
| instruct vmul8S(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (MulVS src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "mulv $dst,$src1,$src2\t# vector (8H)" %} |
| ins_encode %{ |
| __ mulv(as_FloatRegister($dst$$reg), __ T8H, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmul128); |
| %} |
| |
| instruct vmul2I(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (MulVI src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "mulv $dst,$src1,$src2\t# vector (2S)" %} |
| ins_encode %{ |
| __ mulv(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmul64); |
| %} |
| |
| instruct vmul4I(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (MulVI src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "mulv $dst,$src1,$src2\t# vector (4S)" %} |
| ins_encode %{ |
| __ mulv(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmul128); |
| %} |
| |
| instruct vmul2F(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (MulVF src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fmul $dst,$src1,$src2\t# vector (2S)" %} |
| ins_encode %{ |
| __ fmul(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp64); |
| %} |
| |
| instruct vmul4F(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (MulVF src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fmul $dst,$src1,$src2\t# vector (4S)" %} |
| ins_encode %{ |
| __ fmul(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp128); |
| %} |
| |
| instruct vmul2D(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (MulVD src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fmul $dst,$src1,$src2\t# vector (2D)" %} |
| ins_encode %{ |
| __ fmul(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp128); |
| %} |
| |
| // --------------------------------- MLA -------------------------------------- |
| |
| instruct vmla4S(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (AddVS dst (MulVS src1 src2))); |
| ins_cost(INSN_COST); |
| format %{ "mlav $dst,$src1,$src2\t# vector (4H)" %} |
| ins_encode %{ |
| __ mlav(as_FloatRegister($dst$$reg), __ T4H, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmla64); |
| %} |
| |
| instruct vmla8S(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (AddVS dst (MulVS src1 src2))); |
| ins_cost(INSN_COST); |
| format %{ "mlav $dst,$src1,$src2\t# vector (8H)" %} |
| ins_encode %{ |
| __ mlav(as_FloatRegister($dst$$reg), __ T8H, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmla128); |
| %} |
| |
| instruct vmla2I(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (AddVI dst (MulVI src1 src2))); |
| ins_cost(INSN_COST); |
| format %{ "mlav $dst,$src1,$src2\t# vector (2S)" %} |
| ins_encode %{ |
| __ mlav(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmla64); |
| %} |
| |
| instruct vmla4I(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (AddVI dst (MulVI src1 src2))); |
| ins_cost(INSN_COST); |
| format %{ "mlav $dst,$src1,$src2\t# vector (4S)" %} |
| ins_encode %{ |
| __ mlav(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmla128); |
| %} |
| |
| // dst + src1 * src2 |
| instruct vmla2F(vecD dst, vecD src1, vecD src2) %{ |
| predicate(UseFMA && n->as_Vector()->length() == 2); |
| match(Set dst (FmaVF dst (Binary src1 src2))); |
| format %{ "fmla $dst,$src1,$src2\t# vector (2S)" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ fmla(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp64); |
| %} |
| |
| // dst + src1 * src2 |
| instruct vmla4F(vecX dst, vecX src1, vecX src2) %{ |
| predicate(UseFMA && n->as_Vector()->length() == 4); |
| match(Set dst (FmaVF dst (Binary src1 src2))); |
| format %{ "fmla $dst,$src1,$src2\t# vector (4S)" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ fmla(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp128); |
| %} |
| |
| // dst + src1 * src2 |
| instruct vmla2D(vecX dst, vecX src1, vecX src2) %{ |
| predicate(UseFMA && n->as_Vector()->length() == 2); |
| match(Set dst (FmaVD dst (Binary src1 src2))); |
| format %{ "fmla $dst,$src1,$src2\t# vector (2D)" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ fmla(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp128); |
| %} |
| |
| // --------------------------------- MLS -------------------------------------- |
| |
| instruct vmls4S(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (SubVS dst (MulVS src1 src2))); |
| ins_cost(INSN_COST); |
| format %{ "mlsv $dst,$src1,$src2\t# vector (4H)" %} |
| ins_encode %{ |
| __ mlsv(as_FloatRegister($dst$$reg), __ T4H, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmla64); |
| %} |
| |
| instruct vmls8S(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (SubVS dst (MulVS src1 src2))); |
| ins_cost(INSN_COST); |
| format %{ "mlsv $dst,$src1,$src2\t# vector (8H)" %} |
| ins_encode %{ |
| __ mlsv(as_FloatRegister($dst$$reg), __ T8H, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmla128); |
| %} |
| |
| instruct vmls2I(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (SubVI dst (MulVI src1 src2))); |
| ins_cost(INSN_COST); |
| format %{ "mlsv $dst,$src1,$src2\t# vector (2S)" %} |
| ins_encode %{ |
| __ mlsv(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmla64); |
| %} |
| |
| instruct vmls4I(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (SubVI dst (MulVI src1 src2))); |
| ins_cost(INSN_COST); |
| format %{ "mlsv $dst,$src1,$src2\t# vector (4S)" %} |
| ins_encode %{ |
| __ mlsv(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmla128); |
| %} |
| |
| // dst - src1 * src2 |
| instruct vmls2F(vecD dst, vecD src1, vecD src2) %{ |
| predicate(UseFMA && n->as_Vector()->length() == 2); |
| match(Set dst (FmaVF dst (Binary (NegVF src1) src2))); |
| match(Set dst (FmaVF dst (Binary src1 (NegVF src2)))); |
| format %{ "fmls $dst,$src1,$src2\t# vector (2S)" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ fmls(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp64); |
| %} |
| |
| // dst - src1 * src2 |
| instruct vmls4F(vecX dst, vecX src1, vecX src2) %{ |
| predicate(UseFMA && n->as_Vector()->length() == 4); |
| match(Set dst (FmaVF dst (Binary (NegVF src1) src2))); |
| match(Set dst (FmaVF dst (Binary src1 (NegVF src2)))); |
| format %{ "fmls $dst,$src1,$src2\t# vector (4S)" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ fmls(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp128); |
| %} |
| |
| // dst - src1 * src2 |
| instruct vmls2D(vecX dst, vecX src1, vecX src2) %{ |
| predicate(UseFMA && n->as_Vector()->length() == 2); |
| match(Set dst (FmaVD dst (Binary (NegVD src1) src2))); |
| match(Set dst (FmaVD dst (Binary src1 (NegVD src2)))); |
| format %{ "fmls $dst,$src1,$src2\t# vector (2D)" %} |
| ins_cost(INSN_COST); |
| ins_encode %{ |
| __ fmls(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp128); |
| %} |
| |
| // --------------------------------- DIV -------------------------------------- |
| |
| instruct vdiv2F(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (DivVF src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fdiv $dst,$src1,$src2\t# vector (2S)" %} |
| ins_encode %{ |
| __ fdiv(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp64); |
| %} |
| |
| instruct vdiv4F(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (DivVF src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fdiv $dst,$src1,$src2\t# vector (4S)" %} |
| ins_encode %{ |
| __ fdiv(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp128); |
| %} |
| |
| instruct vdiv2D(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (DivVD src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "fdiv $dst,$src1,$src2\t# vector (2D)" %} |
| ins_encode %{ |
| __ fdiv(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vmuldiv_fp128); |
| %} |
| |
| // --------------------------------- SQRT ------------------------------------- |
| |
| instruct vsqrt2D(vecX dst, vecX src) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (SqrtVD src)); |
| format %{ "fsqrt $dst, $src\t# vector (2D)" %} |
| ins_encode %{ |
| __ fsqrt(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src$$reg)); |
| %} |
| ins_pipe(vsqrt_fp128); |
| %} |
| |
| // --------------------------------- ABS -------------------------------------- |
| |
| instruct vabs2F(vecD dst, vecD src) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (AbsVF src)); |
| ins_cost(INSN_COST * 3); |
| format %{ "fabs $dst,$src\t# vector (2S)" %} |
| ins_encode %{ |
| __ fabs(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src$$reg)); |
| %} |
| ins_pipe(vunop_fp64); |
| %} |
| |
| instruct vabs4F(vecX dst, vecX src) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (AbsVF src)); |
| ins_cost(INSN_COST * 3); |
| format %{ "fabs $dst,$src\t# vector (4S)" %} |
| ins_encode %{ |
| __ fabs(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src$$reg)); |
| %} |
| ins_pipe(vunop_fp128); |
| %} |
| |
| instruct vabs2D(vecX dst, vecX src) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (AbsVD src)); |
| ins_cost(INSN_COST * 3); |
| format %{ "fabs $dst,$src\t# vector (2D)" %} |
| ins_encode %{ |
| __ fabs(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src$$reg)); |
| %} |
| ins_pipe(vunop_fp128); |
| %} |
| |
| // --------------------------------- NEG -------------------------------------- |
| |
| instruct vneg2F(vecD dst, vecD src) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (NegVF src)); |
| ins_cost(INSN_COST * 3); |
| format %{ "fneg $dst,$src\t# vector (2S)" %} |
| ins_encode %{ |
| __ fneg(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src$$reg)); |
| %} |
| ins_pipe(vunop_fp64); |
| %} |
| |
| instruct vneg4F(vecX dst, vecX src) |
| %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (NegVF src)); |
| ins_cost(INSN_COST * 3); |
| format %{ "fneg $dst,$src\t# vector (4S)" %} |
| ins_encode %{ |
| __ fneg(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src$$reg)); |
| %} |
| ins_pipe(vunop_fp128); |
| %} |
| |
| instruct vneg2D(vecX dst, vecX src) |
| %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (NegVD src)); |
| ins_cost(INSN_COST * 3); |
| format %{ "fneg $dst,$src\t# vector (2D)" %} |
| ins_encode %{ |
| __ fneg(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src$$reg)); |
| %} |
| ins_pipe(vunop_fp128); |
| %} |
| |
| // --------------------------------- AND -------------------------------------- |
| |
| instruct vand8B(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length_in_bytes() == 4 || |
| n->as_Vector()->length_in_bytes() == 8); |
| match(Set dst (AndV src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "and $dst,$src1,$src2\t# vector (8B)" %} |
| ins_encode %{ |
| __ andr(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vlogical64); |
| %} |
| |
| instruct vand16B(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length_in_bytes() == 16); |
| match(Set dst (AndV src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "and $dst,$src1,$src2\t# vector (16B)" %} |
| ins_encode %{ |
| __ andr(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vlogical128); |
| %} |
| |
| // --------------------------------- OR --------------------------------------- |
| |
| instruct vor8B(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length_in_bytes() == 4 || |
| n->as_Vector()->length_in_bytes() == 8); |
| match(Set dst (OrV src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "and $dst,$src1,$src2\t# vector (8B)" %} |
| ins_encode %{ |
| __ orr(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vlogical64); |
| %} |
| |
| instruct vor16B(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length_in_bytes() == 16); |
| match(Set dst (OrV src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "orr $dst,$src1,$src2\t# vector (16B)" %} |
| ins_encode %{ |
| __ orr(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vlogical128); |
| %} |
| |
| // --------------------------------- XOR -------------------------------------- |
| |
| instruct vxor8B(vecD dst, vecD src1, vecD src2) |
| %{ |
| predicate(n->as_Vector()->length_in_bytes() == 4 || |
| n->as_Vector()->length_in_bytes() == 8); |
| match(Set dst (XorV src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "xor $dst,$src1,$src2\t# vector (8B)" %} |
| ins_encode %{ |
| __ eor(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vlogical64); |
| %} |
| |
| instruct vxor16B(vecX dst, vecX src1, vecX src2) |
| %{ |
| predicate(n->as_Vector()->length_in_bytes() == 16); |
| match(Set dst (XorV src1 src2)); |
| ins_cost(INSN_COST); |
| format %{ "xor $dst,$src1,$src2\t# vector (16B)" %} |
| ins_encode %{ |
| __ eor(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src1$$reg), |
| as_FloatRegister($src2$$reg)); |
| %} |
| ins_pipe(vlogical128); |
| %} |
| |
| // ------------------------------ Shift --------------------------------------- |
| |
| instruct vshiftcntL(vecX dst, iRegIorL2I cnt) %{ |
| match(Set dst (LShiftCntV cnt)); |
| format %{ "dup $dst, $cnt\t# shift count (vecX)" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg)); |
| %} |
| ins_pipe(vdup_reg_reg128); |
| %} |
| |
| // Right shifts on aarch64 SIMD are implemented as left shift by -ve amount |
| instruct vshiftcntR(vecX dst, iRegIorL2I cnt) %{ |
| match(Set dst (RShiftCntV cnt)); |
| format %{ "dup $dst, $cnt\t# shift count (vecX)\n\tneg $dst, $dst\t T16B" %} |
| ins_encode %{ |
| __ dup(as_FloatRegister($dst$$reg), __ T16B, as_Register($cnt$$reg)); |
| __ negr(as_FloatRegister($dst$$reg), __ T16B, as_FloatRegister($dst$$reg)); |
| %} |
| ins_pipe(vdup_reg_reg128); |
| %} |
| |
| instruct vsll8B(vecD dst, vecD src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 4 || |
| n->as_Vector()->length() == 8); |
| match(Set dst (LShiftVB src shift)); |
| match(Set dst (RShiftVB src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshl $dst,$src,$shift\t# vector (8B)" %} |
| ins_encode %{ |
| __ sshl(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift64); |
| %} |
| |
| instruct vsll16B(vecX dst, vecX src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 16); |
| match(Set dst (LShiftVB src shift)); |
| match(Set dst (RShiftVB src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshl $dst,$src,$shift\t# vector (16B)" %} |
| ins_encode %{ |
| __ sshl(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift128); |
| %} |
| |
| instruct vsrl8B(vecD dst, vecD src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 4 || |
| n->as_Vector()->length() == 8); |
| match(Set dst (URShiftVB src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushl $dst,$src,$shift\t# vector (8B)" %} |
| ins_encode %{ |
| __ ushl(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift64); |
| %} |
| |
| instruct vsrl16B(vecX dst, vecX src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 16); |
| match(Set dst (URShiftVB src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushl $dst,$src,$shift\t# vector (16B)" %} |
| ins_encode %{ |
| __ ushl(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift128); |
| %} |
| |
| instruct vsll8B_imm(vecD dst, vecD src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 4 || |
| n->as_Vector()->length() == 8); |
| match(Set dst (LShiftVB src shift)); |
| ins_cost(INSN_COST); |
| format %{ "shl $dst, $src, $shift\t# vector (8B)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 8) { |
| __ eor(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($src$$reg)); |
| } else { |
| __ shl(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src$$reg), sh); |
| } |
| %} |
| ins_pipe(vshift64_imm); |
| %} |
| |
| instruct vsll16B_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 16); |
| match(Set dst (LShiftVB src shift)); |
| ins_cost(INSN_COST); |
| format %{ "shl $dst, $src, $shift\t# vector (16B)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 8) { |
| __ eor(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($src$$reg)); |
| } else { |
| __ shl(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src$$reg), sh); |
| } |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| instruct vsra8B_imm(vecD dst, vecD src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 4 || |
| n->as_Vector()->length() == 8); |
| match(Set dst (RShiftVB src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshr $dst, $src, $shift\t# vector (8B)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 8) sh = 7; |
| sh = -sh & 7; |
| __ sshr(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src$$reg), sh); |
| %} |
| ins_pipe(vshift64_imm); |
| %} |
| |
| instruct vsra16B_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 16); |
| match(Set dst (RShiftVB src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshr $dst, $src, $shift\t# vector (16B)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 8) sh = 7; |
| sh = -sh & 7; |
| __ sshr(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src$$reg), sh); |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| instruct vsrl8B_imm(vecD dst, vecD src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 4 || |
| n->as_Vector()->length() == 8); |
| match(Set dst (URShiftVB src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushr $dst, $src, $shift\t# vector (8B)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 8) { |
| __ eor(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($src$$reg)); |
| } else { |
| __ ushr(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src$$reg), -sh & 7); |
| } |
| %} |
| ins_pipe(vshift64_imm); |
| %} |
| |
| instruct vsrl16B_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 16); |
| match(Set dst (URShiftVB src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushr $dst, $src, $shift\t# vector (16B)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 8) { |
| __ eor(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($src$$reg)); |
| } else { |
| __ ushr(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src$$reg), -sh & 7); |
| } |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| instruct vsll4S(vecD dst, vecD src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (LShiftVS src shift)); |
| match(Set dst (RShiftVS src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshl $dst,$src,$shift\t# vector (4H)" %} |
| ins_encode %{ |
| __ sshl(as_FloatRegister($dst$$reg), __ T4H, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift64); |
| %} |
| |
| instruct vsll8S(vecX dst, vecX src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (LShiftVS src shift)); |
| match(Set dst (RShiftVS src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshl $dst,$src,$shift\t# vector (8H)" %} |
| ins_encode %{ |
| __ sshl(as_FloatRegister($dst$$reg), __ T8H, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift128); |
| %} |
| |
| instruct vsrl4S(vecD dst, vecD src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (URShiftVS src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushl $dst,$src,$shift\t# vector (4H)" %} |
| ins_encode %{ |
| __ ushl(as_FloatRegister($dst$$reg), __ T4H, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift64); |
| %} |
| |
| instruct vsrl8S(vecX dst, vecX src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (URShiftVS src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushl $dst,$src,$shift\t# vector (8H)" %} |
| ins_encode %{ |
| __ ushl(as_FloatRegister($dst$$reg), __ T8H, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift128); |
| %} |
| |
| instruct vsll4S_imm(vecD dst, vecD src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (LShiftVS src shift)); |
| ins_cost(INSN_COST); |
| format %{ "shl $dst, $src, $shift\t# vector (4H)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 16) { |
| __ eor(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($src$$reg)); |
| } else { |
| __ shl(as_FloatRegister($dst$$reg), __ T4H, |
| as_FloatRegister($src$$reg), sh); |
| } |
| %} |
| ins_pipe(vshift64_imm); |
| %} |
| |
| instruct vsll8S_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (LShiftVS src shift)); |
| ins_cost(INSN_COST); |
| format %{ "shl $dst, $src, $shift\t# vector (8H)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 16) { |
| __ eor(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($src$$reg)); |
| } else { |
| __ shl(as_FloatRegister($dst$$reg), __ T8H, |
| as_FloatRegister($src$$reg), sh); |
| } |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| instruct vsra4S_imm(vecD dst, vecD src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (RShiftVS src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshr $dst, $src, $shift\t# vector (4H)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 16) sh = 15; |
| sh = -sh & 15; |
| __ sshr(as_FloatRegister($dst$$reg), __ T4H, |
| as_FloatRegister($src$$reg), sh); |
| %} |
| ins_pipe(vshift64_imm); |
| %} |
| |
| instruct vsra8S_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (RShiftVS src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshr $dst, $src, $shift\t# vector (8H)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 16) sh = 15; |
| sh = -sh & 15; |
| __ sshr(as_FloatRegister($dst$$reg), __ T8H, |
| as_FloatRegister($src$$reg), sh); |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| instruct vsrl4S_imm(vecD dst, vecD src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 2 || |
| n->as_Vector()->length() == 4); |
| match(Set dst (URShiftVS src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushr $dst, $src, $shift\t# vector (4H)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 16) { |
| __ eor(as_FloatRegister($dst$$reg), __ T8B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($src$$reg)); |
| } else { |
| __ ushr(as_FloatRegister($dst$$reg), __ T4H, |
| as_FloatRegister($src$$reg), -sh & 15); |
| } |
| %} |
| ins_pipe(vshift64_imm); |
| %} |
| |
| instruct vsrl8S_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 8); |
| match(Set dst (URShiftVS src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushr $dst, $src, $shift\t# vector (8H)" %} |
| ins_encode %{ |
| int sh = (int)$shift$$constant & 31; |
| if (sh >= 16) { |
| __ eor(as_FloatRegister($dst$$reg), __ T16B, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($src$$reg)); |
| } else { |
| __ ushr(as_FloatRegister($dst$$reg), __ T8H, |
| as_FloatRegister($src$$reg), -sh & 15); |
| } |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| instruct vsll2I(vecD dst, vecD src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (LShiftVI src shift)); |
| match(Set dst (RShiftVI src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshl $dst,$src,$shift\t# vector (2S)" %} |
| ins_encode %{ |
| __ sshl(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift64); |
| %} |
| |
| instruct vsll4I(vecX dst, vecX src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (LShiftVI src shift)); |
| match(Set dst (RShiftVI src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshl $dst,$src,$shift\t# vector (4S)" %} |
| ins_encode %{ |
| __ sshl(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift128); |
| %} |
| |
| instruct vsrl2I(vecD dst, vecD src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (URShiftVI src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushl $dst,$src,$shift\t# vector (2S)" %} |
| ins_encode %{ |
| __ ushl(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift64); |
| %} |
| |
| instruct vsrl4I(vecX dst, vecX src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (URShiftVI src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushl $dst,$src,$shift\t# vector (4S)" %} |
| ins_encode %{ |
| __ ushl(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift128); |
| %} |
| |
| instruct vsll2I_imm(vecD dst, vecD src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (LShiftVI src shift)); |
| ins_cost(INSN_COST); |
| format %{ "shl $dst, $src, $shift\t# vector (2S)" %} |
| ins_encode %{ |
| __ shl(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src$$reg), |
| (int)$shift$$constant & 31); |
| %} |
| ins_pipe(vshift64_imm); |
| %} |
| |
| instruct vsll4I_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (LShiftVI src shift)); |
| ins_cost(INSN_COST); |
| format %{ "shl $dst, $src, $shift\t# vector (4S)" %} |
| ins_encode %{ |
| __ shl(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src$$reg), |
| (int)$shift$$constant & 31); |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| instruct vsra2I_imm(vecD dst, vecD src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (RShiftVI src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshr $dst, $src, $shift\t# vector (2S)" %} |
| ins_encode %{ |
| __ sshr(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src$$reg), |
| -(int)$shift$$constant & 31); |
| %} |
| ins_pipe(vshift64_imm); |
| %} |
| |
| instruct vsra4I_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (RShiftVI src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshr $dst, $src, $shift\t# vector (4S)" %} |
| ins_encode %{ |
| __ sshr(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src$$reg), |
| -(int)$shift$$constant & 31); |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| instruct vsrl2I_imm(vecD dst, vecD src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (URShiftVI src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushr $dst, $src, $shift\t# vector (2S)" %} |
| ins_encode %{ |
| __ ushr(as_FloatRegister($dst$$reg), __ T2S, |
| as_FloatRegister($src$$reg), |
| -(int)$shift$$constant & 31); |
| %} |
| ins_pipe(vshift64_imm); |
| %} |
| |
| instruct vsrl4I_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 4); |
| match(Set dst (URShiftVI src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushr $dst, $src, $shift\t# vector (4S)" %} |
| ins_encode %{ |
| __ ushr(as_FloatRegister($dst$$reg), __ T4S, |
| as_FloatRegister($src$$reg), |
| -(int)$shift$$constant & 31); |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| instruct vsll2L(vecX dst, vecX src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (LShiftVL src shift)); |
| match(Set dst (RShiftVL src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshl $dst,$src,$shift\t# vector (2D)" %} |
| ins_encode %{ |
| __ sshl(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift128); |
| %} |
| |
| instruct vsrl2L(vecX dst, vecX src, vecX shift) %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (URShiftVL src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushl $dst,$src,$shift\t# vector (2D)" %} |
| ins_encode %{ |
| __ ushl(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src$$reg), |
| as_FloatRegister($shift$$reg)); |
| %} |
| ins_pipe(vshift128); |
| %} |
| |
| instruct vsll2L_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (LShiftVL src shift)); |
| ins_cost(INSN_COST); |
| format %{ "shl $dst, $src, $shift\t# vector (2D)" %} |
| ins_encode %{ |
| __ shl(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src$$reg), |
| (int)$shift$$constant & 63); |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| instruct vsra2L_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (RShiftVL src shift)); |
| ins_cost(INSN_COST); |
| format %{ "sshr $dst, $src, $shift\t# vector (2D)" %} |
| ins_encode %{ |
| __ sshr(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src$$reg), |
| -(int)$shift$$constant & 63); |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| instruct vsrl2L_imm(vecX dst, vecX src, immI shift) %{ |
| predicate(n->as_Vector()->length() == 2); |
| match(Set dst (URShiftVL src shift)); |
| ins_cost(INSN_COST); |
| format %{ "ushr $dst, $src, $shift\t# vector (2D)" %} |
| ins_encode %{ |
| __ ushr(as_FloatRegister($dst$$reg), __ T2D, |
| as_FloatRegister($src$$reg), |
| -(int)$shift$$constant & 63); |
| %} |
| ins_pipe(vshift128_imm); |
| %} |
| |
| //----------PEEPHOLE RULES----------------------------------------------------- |
| // These must follow all instruction definitions as they use the names |
| // defined in the instructions definitions. |
| // |
| // peepmatch ( root_instr_name [preceding_instruction]* ); |
| // |
| // peepconstraint %{ |
| // (instruction_number.operand_name relational_op instruction_number.operand_name |
| // [, ...] ); |
| // // instruction numbers are zero-based using left to right order in peepmatch |
| // |
| // peepreplace ( instr_name ( [instruction_number.operand_name]* ) ); |
| // // provide an instruction_number.operand_name for each operand that appears |
| // // in the replacement instruction's match rule |
| // |
| // ---------VM FLAGS--------------------------------------------------------- |
| // |
| // All peephole optimizations can be turned off using -XX:-OptoPeephole |
| // |
| // Each peephole rule is given an identifying number starting with zero and |
| // increasing by one in the order seen by the parser. An individual peephole |
| // can be enabled, and all others disabled, by using -XX:OptoPeepholeAt=# |
| // on the command-line. |
| // |
| // ---------CURRENT LIMITATIONS---------------------------------------------- |
| // |
| // Only match adjacent instructions in same basic block |
| // Only equality constraints |
| // Only constraints between operands, not (0.dest_reg == RAX_enc) |
| // Only one replacement instruction |
| // |
| // ---------EXAMPLE---------------------------------------------------------- |
| // |
| // // pertinent parts of existing instructions in architecture description |
| // instruct movI(iRegINoSp dst, iRegI src) |
| // %{ |
| // match(Set dst (CopyI src)); |
| // %} |
| // |
| // instruct incI_iReg(iRegINoSp dst, immI1 src, rFlagsReg cr) |
| // %{ |
| // match(Set dst (AddI dst src)); |
| // effect(KILL cr); |
| // %} |
| // |
| // // Change (inc mov) to lea |
| // peephole %{ |
| // // increment preceeded by register-register move |
| // peepmatch ( incI_iReg movI ); |
| // // require that the destination register of the increment |
| // // match the destination register of the move |
| // peepconstraint ( 0.dst == 1.dst ); |
| // // construct a replacement instruction that sets |
| // // the destination to ( move's source register + one ) |
| // peepreplace ( leaI_iReg_immI( 0.dst 1.src 0.src ) ); |
| // %} |
| // |
| |
| // Implementation no longer uses movX instructions since |
| // machine-independent system no longer uses CopyX nodes. |
| // |
| // peephole |
| // %{ |
| // peepmatch (incI_iReg movI); |
| // peepconstraint (0.dst == 1.dst); |
| // peepreplace (leaI_iReg_immI(0.dst 1.src 0.src)); |
| // %} |
| |
| // peephole |
| // %{ |
| // peepmatch (decI_iReg movI); |
| // peepconstraint (0.dst == 1.dst); |
| // peepreplace (leaI_iReg_immI(0.dst 1.src 0.src)); |
| // %} |
| |
| // peephole |
| // %{ |
| // peepmatch (addI_iReg_imm movI); |
| // peepconstraint (0.dst == 1.dst); |
| // peepreplace (leaI_iReg_immI(0.dst 1.src 0.src)); |
| // %} |
| |
| // peephole |
| // %{ |
| // peepmatch (incL_iReg movL); |
| // peepconstraint (0.dst == 1.dst); |
| // peepreplace (leaL_iReg_immL(0.dst 1.src 0.src)); |
| // %} |
| |
| // peephole |
| // %{ |
| // peepmatch (decL_iReg movL); |
| // peepconstraint (0.dst == 1.dst); |
| // peepreplace (leaL_iReg_immL(0.dst 1.src 0.src)); |
| // %} |
| |
| // peephole |
| // %{ |
| // peepmatch (addL_iReg_imm movL); |
| // peepconstraint (0.dst == 1.dst); |
| // peepreplace (leaL_iReg_immL(0.dst 1.src 0.src)); |
| // %} |
| |
| // peephole |
| // %{ |
| // peepmatch (addP_iReg_imm movP); |
| // peepconstraint (0.dst == 1.dst); |
| // peepreplace (leaP_iReg_imm(0.dst 1.src 0.src)); |
| // %} |
| |
| // // Change load of spilled value to only a spill |
| // instruct storeI(memory mem, iRegI src) |
| // %{ |
| // match(Set mem (StoreI mem src)); |
| // %} |
| // |
| // instruct loadI(iRegINoSp dst, memory mem) |
| // %{ |
| // match(Set dst (LoadI mem)); |
| // %} |
| // |
| |
| //----------SMARTSPILL RULES--------------------------------------------------- |
| // These must follow all instruction definitions as they use the names |
| // defined in the instructions definitions. |
| |
| // Local Variables: |
| // mode: c++ |
| // End: |