AVX512: vpextrb/w/d/q and vpinsrb/w/d/q implementation.
This instructions doesn't have intrincis.
Added tests for lowering and encoding.

Differential Revision: http://reviews.llvm.org/D12317

llvm-svn: 249688
diff --git a/llvm/test/CodeGen/X86/avx-isa-check.ll b/llvm/test/CodeGen/X86/avx-isa-check.ll
index 4d8db7d..071891c 100644
--- a/llvm/test/CodeGen/X86/avx-isa-check.ll
+++ b/llvm/test/CodeGen/X86/avx-isa-check.ll
@@ -267,3 +267,59 @@
   %shuffle = shufflevector <16 x i16> zeroinitializer, <16 x i16> %a, <16 x i32> <i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 16, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 24>
   ret <16 x i16> %shuffle
 }
+
+define i64 @extract_v2i64(<2 x i64> %x, i64* %dst) {
+  %r1 = extractelement <2 x i64> %x, i32 0
+  %r2 = extractelement <2 x i64> %x, i32 1
+  store i64 %r2, i64* %dst, align 1
+  ret i64 %r1
+}
+
+define i32 @extract_v4i32(<4 x i32> %x, i32* %dst) {
+  %r1 = extractelement <4 x i32> %x, i32 1
+  %r2 = extractelement <4 x i32> %x, i32 3
+  store i32 %r2, i32* %dst, align 1
+  ret i32 %r1
+}
+
+define i16 @extract_v8i16(<8 x i16> %x, i16* %dst) {
+  %r1 = extractelement <8 x i16> %x, i32 1
+  %r2 = extractelement <8 x i16> %x, i32 3
+  store i16 %r2, i16* %dst, align 1
+  ret i16 %r1
+}
+
+define i8 @extract_v16i8(<16 x i8> %x, i8* %dst) {
+  %r1 = extractelement <16 x i8> %x, i32 1
+  %r2 = extractelement <16 x i8> %x, i32 3
+  store i8 %r2, i8* %dst, align 1
+  ret i8 %r1
+}
+
+define <2 x i64> @insert_v2i64(<2 x i64> %x, i64 %y , i64* %ptr) {
+  %val = load i64, i64* %ptr
+  %r1 = insertelement <2 x i64> %x, i64 %val, i32 1
+  %r2 = insertelement <2 x i64> %r1, i64 %y, i32 3
+  ret <2 x i64> %r2
+}
+
+define <4 x i32> @insert_v4i32(<4 x i32> %x, i32 %y, i32* %ptr) {
+  %val = load i32, i32* %ptr
+  %r1 = insertelement <4 x i32> %x, i32 %val, i32 1
+  %r2 = insertelement <4 x i32> %r1, i32 %y, i32 3
+  ret <4 x i32> %r2
+}
+
+define <8 x i16> @insert_v8i16(<8 x i16> %x, i16 %y, i16* %ptr) {
+  %val = load i16, i16* %ptr
+  %r1 = insertelement <8 x i16> %x, i16 %val, i32 1
+  %r2 = insertelement <8 x i16> %r1, i16 %y, i32 5
+  ret <8 x i16> %r2
+}
+
+define <16 x i8> @insert_v16i8(<16 x i8> %x, i8 %y, i8* %ptr) {
+  %val = load i8, i8* %ptr
+  %r1 = insertelement <16 x i8> %x, i8 %val, i32 3
+  %r2 = insertelement <16 x i8> %r1, i8 %y, i32 10
+  ret <16 x i8> %r2
+}
diff --git a/llvm/test/CodeGen/X86/avx512-extract-subvector.ll b/llvm/test/CodeGen/X86/avx512-extract-subvector.ll
new file mode 100644
index 0000000..81a3bd8
--- /dev/null
+++ b/llvm/test/CodeGen/X86/avx512-extract-subvector.ll
@@ -0,0 +1,55 @@
+; RUN: llc < %s -mtriple=x86_64-apple-darwin -mcpu=skx | FileCheck --check-prefix=SKX %s
+
+
+define <8 x i16> @extract_subvector128_v32i16(<32 x i16> %x) nounwind {
+; SKX-LABEL: extract_subvector128_v32i16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vextracti32x4 $2, %zmm0, %xmm0
+; SKX-NEXT:    retq
+  %r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23>
+  ret <8 x i16> %r1
+}
+
+define <8 x i16> @extract_subvector128_v32i16_first_element(<32 x i16> %x) nounwind {
+; SKX-LABEL: extract_subvector128_v32i16_first_element:
+; SKX:       ## BB#0:
+; SKX-NEXT:    retq
+  %r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>
+  ret <8 x i16> %r1
+}
+
+define <16 x i8> @extract_subvector128_v64i8(<64 x i8> %x) nounwind {
+; SKX-LABEL: extract_subvector128_v64i8:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vextracti32x4 $2, %zmm0, %xmm0
+; SKX-NEXT:    retq
+  %r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38,i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47>
+  ret <16 x i8> %r1
+}
+
+define <16 x i8> @extract_subvector128_v64i8_first_element(<64 x i8> %x) nounwind {
+; SKX-LABEL: extract_subvector128_v64i8_first_element:
+; SKX:       ## BB#0:
+; SKX-NEXT:    retq
+  %r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15>
+  ret <16 x i8> %r1
+}
+
+
+define <16 x i16> @extract_subvector256_v32i16(<32 x i16> %x) nounwind {
+; SKX-LABEL: extract_subvector256_v32i16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; SKX-NEXT:    retq
+  %r1 = shufflevector <32 x i16> %x, <32 x i16> undef, <16 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31>
+  ret <16 x i16> %r1
+}
+
+define <32 x i8> @extract_subvector256_v64i8(<64 x i8> %x) nounwind {
+; SKX-LABEL: extract_subvector256_v64i8:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vextracti64x4 $1, %zmm0, %ymm0
+; SKX-NEXT:    retq
+  %r1 = shufflevector <64 x i8> %x, <64 x i8> undef, <32 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63>
+  ret <32 x i8> %r1
+}
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index 1930384..41ec62c 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -231,6 +231,380 @@
   ret i8 %x2
 }
 
+define i64 @extract_v8i64(<8 x i64> %x, i64* %dst) {
+; SKX-LABEL: extract_v8i64:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpextrq $1, %xmm0, %rax
+; SKX-NEXT:    vextracti64x2 $1, %zmm0, %xmm0
+; SKX-NEXT:    vpextrq $1, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <8 x i64> %x, i32 1
+  %r2 = extractelement <8 x i64> %x, i32 3
+  store i64 %r2, i64* %dst, align 1
+  ret i64 %r1
+}
+
+define i64 @extract_v4i64(<4 x i64> %x, i64* %dst) {
+; SKX-LABEL: extract_v4i64:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpextrq $1, %xmm0, %rax
+; SKX-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT:    vpextrq $1, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <4 x i64> %x, i32 1
+  %r2 = extractelement <4 x i64> %x, i32 3
+  store i64 %r2, i64* %dst, align 1
+  ret i64 %r1
+}
+
+define i64 @extract_v2i64(<2 x i64> %x, i64* %dst) {
+; SKX-LABEL: extract_v2i64:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vmovq %xmm0, %rax
+; SKX-NEXT:    vpextrq $1, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <2 x i64> %x, i32 0
+  %r2 = extractelement <2 x i64> %x, i32 1
+  store i64 %r2, i64* %dst, align 1
+  ret i64 %r1
+}
+
+define i32 @extract_v16i32(<16 x i32> %x, i32* %dst) {
+; SKX-LABEL: extract_v16i32:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpextrd $1, %xmm0, %eax
+; SKX-NEXT:    vextracti32x4 $1, %zmm0, %xmm0
+; SKX-NEXT:    vpextrd $1, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <16 x i32> %x, i32 1
+  %r2 = extractelement <16 x i32> %x, i32 5
+  store i32 %r2, i32* %dst, align 1
+  ret i32 %r1
+}
+
+define i32 @extract_v8i32(<8 x i32> %x, i32* %dst) {
+; SKX-LABEL: extract_v8i32:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpextrd $1, %xmm0, %eax
+; SKX-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT:    vpextrd $1, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <8 x i32> %x, i32 1
+  %r2 = extractelement <8 x i32> %x, i32 5
+  store i32 %r2, i32* %dst, align 1
+  ret i32 %r1
+}
+
+define i32 @extract_v4i32(<4 x i32> %x, i32* %dst) {
+; SKX-LABEL: extract_v4i32:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpextrd $1, %xmm0, %eax
+; SKX-NEXT:    vpextrd $3, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <4 x i32> %x, i32 1
+  %r2 = extractelement <4 x i32> %x, i32 3
+  store i32 %r2, i32* %dst, align 1
+  ret i32 %r1
+}
+
+define i16 @extract_v32i16(<32 x i16> %x, i16* %dst) {
+; SKX-LABEL: extract_v32i16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpextrw $1, %xmm0, %eax
+; SKX-NEXT:    vextracti32x4 $1, %zmm0, %xmm0
+; SKX-NEXT:    vpextrw $1, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <32 x i16> %x, i32 1
+  %r2 = extractelement <32 x i16> %x, i32 9
+  store i16 %r2, i16* %dst, align 1
+  ret i16 %r1
+}
+
+define i16 @extract_v16i16(<16 x i16> %x, i16* %dst) {
+; SKX-LABEL: extract_v16i16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpextrw $1, %xmm0, %eax
+; SKX-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT:    vpextrw $1, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <16 x i16> %x, i32 1
+  %r2 = extractelement <16 x i16> %x, i32 9
+  store i16 %r2, i16* %dst, align 1
+  ret i16 %r1
+}
+
+define i16 @extract_v8i16(<8 x i16> %x, i16* %dst) {
+; SKX-LABEL: extract_v8i16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpextrw $1, %xmm0, %eax
+; SKX-NEXT:    vpextrw $3, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <8 x i16> %x, i32 1
+  %r2 = extractelement <8 x i16> %x, i32 3
+  store i16 %r2, i16* %dst, align 1
+  ret i16 %r1
+}
+
+define i8 @extract_v64i8(<64 x i8> %x, i8* %dst) {
+; SKX-LABEL: extract_v64i8:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpextrb $1, %xmm0, %eax
+; SKX-NEXT:    vextracti32x4 $1, %zmm0, %xmm0
+; SKX-NEXT:    vpextrb $1, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <64 x i8> %x, i32 1
+  %r2 = extractelement <64 x i8> %x, i32 17
+  store i8 %r2, i8* %dst, align 1
+  ret i8 %r1
+}
+
+define i8 @extract_v32i8(<32 x i8> %x, i8* %dst) {
+; SKX-LABEL: extract_v32i8:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpextrb $1, %xmm0, %eax
+; SKX-NEXT:    vextracti128 $1, %ymm0, %xmm0
+; SKX-NEXT:    vpextrb $1, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <32 x i8> %x, i32 1
+  %r2 = extractelement <32 x i8> %x, i32 17
+  store i8 %r2, i8* %dst, align 1
+  ret i8 %r1
+}
+
+define i8 @extract_v16i8(<16 x i8> %x, i8* %dst) {
+; SKX-LABEL: extract_v16i8:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpextrb $1, %xmm0, %eax
+; SKX-NEXT:    vpextrb $3, %xmm0, (%rdi)
+; SKX-NEXT:    retq
+  %r1 = extractelement <16 x i8> %x, i32 1
+  %r2 = extractelement <16 x i8> %x, i32 3
+  store i8 %r2, i8* %dst, align 1
+  ret i8 %r1
+}
+
+define <8 x i64> @insert_v8i64(<8 x i64> %x, i64 %y , i64* %ptr) {
+; SKX-LABEL: insert_v8i64:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrq $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT:    vinserti64x2 $0, %xmm1, %zmm0, %zmm0
+; SKX-NEXT:    vextracti64x2 $1, %zmm0, %xmm1
+; SKX-NEXT:    vpinsrq $1, %rdi, %xmm1, %xmm1
+; SKX-NEXT:    vinserti64x2 $1, %xmm1, %zmm0, %zmm0
+; SKX-NEXT:    retq
+  %val = load i64, i64* %ptr
+  %r1 = insertelement <8 x i64> %x, i64 %val, i32 1
+  %r2 = insertelement <8 x i64> %r1, i64 %y, i32 3
+  ret <8 x i64> %r2
+}
+
+define <4 x i64> @insert_v4i64(<4 x i64> %x, i64 %y , i64* %ptr) {
+; SKX-LABEL: insert_v4i64:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrq $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; SKX-NEXT:    vpinsrq $1, %rdi, %xmm1, %xmm1
+; SKX-NEXT:    vinserti64x2 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT:    retq
+  %val = load i64, i64* %ptr
+  %r1 = insertelement <4 x i64> %x, i64 %val, i32 1
+  %r2 = insertelement <4 x i64> %r1, i64 %y, i32 3
+  ret <4 x i64> %r2
+}
+
+define <2 x i64> @insert_v2i64(<2 x i64> %x, i64 %y , i64* %ptr) {
+; SKX-LABEL: insert_v2i64:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrq $1, (%rsi), %xmm0, %xmm0
+; SKX-NEXT:    vpinsrq $3, %rdi, %xmm0, %xmm0
+; SKX-NEXT:    retq
+  %val = load i64, i64* %ptr
+  %r1 = insertelement <2 x i64> %x, i64 %val, i32 1
+  %r2 = insertelement <2 x i64> %r1, i64 %y, i32 3
+  ret <2 x i64> %r2
+}
+
+define <16 x i32> @insert_v16i32(<16 x i32> %x, i32 %y, i32* %ptr) {
+; SKX-LABEL: insert_v16i32:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrd $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; SKX-NEXT:    vextracti32x4 $1, %zmm0, %xmm1
+; SKX-NEXT:    vpinsrd $1, %edi, %xmm1, %xmm1
+; SKX-NEXT:    vinserti32x4 $1, %xmm1, %zmm0, %zmm0
+; SKX-NEXT:    retq
+  %val = load i32, i32* %ptr
+  %r1 = insertelement <16 x i32> %x, i32 %val, i32 1
+  %r2 = insertelement <16 x i32> %r1, i32 %y, i32 5
+  ret <16 x i32> %r2
+}
+
+define <8 x i32> @insert_v8i32(<8 x i32> %x, i32 %y, i32* %ptr) {
+; KNL-LABEL: insert_v8i32:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpinsrd $1, (%rsi), %xmm0, %xmm1
+; KNL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; KNL-NEXT:    vpinsrd $1, %edi, %xmm1, %xmm1
+; KNL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: insert_v8i32:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrd $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; SKX-NEXT:    vpinsrd $1, %edi, %xmm1, %xmm1
+; SKX-NEXT:    vinserti32x4 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT:    retq
+  %val = load i32, i32* %ptr
+  %r1 = insertelement <8 x i32> %x, i32 %val, i32 1
+  %r2 = insertelement <8 x i32> %r1, i32 %y, i32 5
+  ret <8 x i32> %r2
+}
+
+define <4 x i32> @insert_v4i32(<4 x i32> %x, i32 %y, i32* %ptr) {
+; KNL-LABEL: insert_v4i32:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpinsrd $1, (%rsi), %xmm0, %xmm0
+; KNL-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: insert_v4i32:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrd $1, (%rsi), %xmm0, %xmm0
+; SKX-NEXT:    vpinsrd $3, %edi, %xmm0, %xmm0
+; SKX-NEXT:    retq
+  %val = load i32, i32* %ptr
+  %r1 = insertelement <4 x i32> %x, i32 %val, i32 1
+  %r2 = insertelement <4 x i32> %r1, i32 %y, i32 3
+  ret <4 x i32> %r2
+}
+
+define <32 x i16> @insert_v32i16(<32 x i16> %x, i16 %y, i16* %ptr) {
+; KNL-LABEL: insert_v32i16:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm2
+; KNL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm2
+; KNL-NEXT:    vpinsrw $1, %edi, %xmm2, %xmm2
+; KNL-NEXT:    vinserti128 $1, %xmm2, %ymm0, %ymm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: insert_v32i16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; SKX-NEXT:    vextracti32x4 $1, %zmm0, %xmm1
+; SKX-NEXT:    vpinsrw $1, %edi, %xmm1, %xmm1
+; SKX-NEXT:    vinserti32x4 $1, %xmm1, %zmm0, %zmm0
+; SKX-NEXT:    retq
+  %val = load i16, i16* %ptr
+  %r1 = insertelement <32 x i16> %x, i16 %val, i32 1
+  %r2 = insertelement <32 x i16> %r1, i16 %y, i32 9
+  ret <32 x i16> %r2
+}
+
+define <16 x i16> @insert_v16i16(<16 x i16> %x, i16 %y, i16* %ptr) {
+; KNL-LABEL: insert_v16i16:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm1
+; KNL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; KNL-NEXT:    vpinsrw $1, %edi, %xmm1, %xmm1
+; KNL-NEXT:    vinserti128 $1, %xmm1, %ymm0, %ymm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: insert_v16i16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; SKX-NEXT:    vpinsrw $1, %edi, %xmm1, %xmm1
+; SKX-NEXT:    vinserti32x4 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT:    retq
+  %val = load i16, i16* %ptr
+  %r1 = insertelement <16 x i16> %x, i16 %val, i32 1
+  %r2 = insertelement <16 x i16> %r1, i16 %y, i32 9
+  ret <16 x i16> %r2
+}
+
+define <8 x i16> @insert_v8i16(<8 x i16> %x, i16 %y, i16* %ptr) {
+; KNL-LABEL: insert_v8i16:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm0
+; KNL-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: insert_v8i16:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrw $1, (%rsi), %xmm0, %xmm0
+; SKX-NEXT:    vpinsrw $5, %edi, %xmm0, %xmm0
+; SKX-NEXT:    retq
+  %val = load i16, i16* %ptr
+  %r1 = insertelement <8 x i16> %x, i16 %val, i32 1
+  %r2 = insertelement <8 x i16> %r1, i16 %y, i32 5
+  ret <8 x i16> %r2
+}
+
+define <64 x i8> @insert_v64i8(<64 x i8> %x, i8 %y, i8* %ptr) {
+; KNL-LABEL: insert_v64i8:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpinsrb $1, (%rsi), %xmm0, %xmm2
+; KNL-NEXT:    vpblendd {{.*#+}} ymm0 = ymm2[0,1,2,3],ymm0[4,5,6,7]
+; KNL-NEXT:    vextracti128 $1, %ymm1, %xmm2
+; KNL-NEXT:    vpinsrb $2, %edi, %xmm2, %xmm2
+; KNL-NEXT:    vinserti128 $1, %xmm2, %ymm1, %ymm1
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: insert_v64i8:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrb $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT:    vinserti32x4 $0, %xmm1, %zmm0, %zmm0
+; SKX-NEXT:    vextracti32x4 $3, %zmm0, %xmm1
+; SKX-NEXT:    vpinsrb $2, %edi, %xmm1, %xmm1
+; SKX-NEXT:    vinserti32x4 $3, %xmm1, %zmm0, %zmm0
+; SKX-NEXT:    retq
+  %val = load i8, i8* %ptr
+  %r1 = insertelement <64 x i8> %x, i8 %val, i32 1
+  %r2 = insertelement <64 x i8> %r1, i8 %y, i32 50
+  ret <64 x i8> %r2
+}
+
+define <32 x i8> @insert_v32i8(<32 x i8> %x, i8 %y, i8* %ptr) {
+; SKX-LABEL: insert_v32i8:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrb $1, (%rsi), %xmm0, %xmm1
+; SKX-NEXT:    vpblendd {{.*#+}} ymm0 = ymm1[0,1,2,3],ymm0[4,5,6,7]
+; SKX-NEXT:    vextracti128 $1, %ymm0, %xmm1
+; SKX-NEXT:    vpinsrb $1, %edi, %xmm1, %xmm1
+; SKX-NEXT:    vinserti32x4 $1, %xmm1, %ymm0, %ymm0
+; SKX-NEXT:    retq
+  %val = load i8, i8* %ptr
+  %r1 = insertelement <32 x i8> %x, i8 %val, i32 1
+  %r2 = insertelement <32 x i8> %r1, i8 %y, i32 17
+  ret <32 x i8> %r2
+}
+
+define <16 x i8> @insert_v16i8(<16 x i8> %x, i8 %y, i8* %ptr) {
+; KNL-LABEL: insert_v16i8:
+; KNL:       ## BB#0:
+; KNL-NEXT:    vpinsrb $3, (%rsi), %xmm0, %xmm0
+; KNL-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
+; KNL-NEXT:    retq
+;
+; SKX-LABEL: insert_v16i8:
+; SKX:       ## BB#0:
+; SKX-NEXT:    vpinsrb $3, (%rsi), %xmm0, %xmm0
+; SKX-NEXT:    vpinsrb $10, %edi, %xmm0, %xmm0
+; SKX-NEXT:    retq
+  %val = load i8, i8* %ptr
+  %r1 = insertelement <16 x i8> %x, i8 %val, i32 3
+  %r2 = insertelement <16 x i8> %r1, i8 %y, i32 10
+  ret <16 x i8> %r2
+}
+
 define <8 x i64> @test_insert_128_v8i64(<8 x i64> %x, i64 %y) {
 ; KNL-LABEL: test_insert_128_v8i64:
 ; KNL:       ## BB#0:
diff --git a/llvm/test/MC/X86/x86-64-avx512bw.s b/llvm/test/MC/X86/x86-64-avx512bw.s
index feca21a..d968986 100644
--- a/llvm/test/MC/X86/x86-64-avx512bw.s
+++ b/llvm/test/MC/X86/x86-64-avx512bw.s
@@ -4112,6 +4112,205 @@
 // CHECK:  encoding: [0x62,0x61,0x2d,0x40,0x69,0xb2,0xc0,0xdf,0xff,0xff]
           vpunpckhwd -8256(%rdx), %zmm26, %zmm30
 
+// CHECK: vpextrb $171, %xmm17, %eax
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x14,0xc8,0xab]
+          vpextrb $171, %xmm17, %eax
+
+// CHECK: vpextrb $123, %xmm17, %eax
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x14,0xc8,0x7b]
+          vpextrb $123, %xmm17, %eax
+
+// CHECK: vpextrb $123, %xmm17, %r8d
+// CHECK:  encoding: [0x62,0xc3,0x7d,0x08,0x14,0xc8,0x7b]
+          vpextrb $123, %xmm17,%r8d
+
+// CHECK: vpextrb $123, %xmm17, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x14,0x09,0x7b]
+          vpextrb $123, %xmm17, (%rcx)
+
+// CHECK: vpextrb $123, %xmm17, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x08,0x14,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vpextrb $123, %xmm17, 291(%rax,%r14,8)
+
+// CHECK: vpextrb $123, %xmm17, 127(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x14,0x4a,0x7f,0x7b]
+          vpextrb $123, %xmm17, 127(%rdx)
+
+// CHECK: vpextrb $123, %xmm17, 128(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x14,0x8a,0x80,0x00,0x00,0x00,0x7b]
+          vpextrb $123, %xmm17, 128(%rdx)
+
+// CHECK: vpextrb $123, %xmm17, -128(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x14,0x4a,0x80,0x7b]
+          vpextrb $123, %xmm17, -128(%rdx)
+
+// CHECK: vpextrb $123, %xmm17, -129(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x14,0x8a,0x7f,0xff,0xff,0xff,0x7b]
+          vpextrb $123, %xmm17, -129(%rdx)
+// CHECK: vpinsrb $171, %eax, %xmm25, %xmm25
+// CHECK:  encoding: [0x62,0x63,0x35,0x00,0x20,0xc8,0xab]
+          vpinsrb $171,%eax, %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, %eax, %xmm25, %xmm25
+// CHECK:  encoding: [0x62,0x63,0x35,0x00,0x20,0xc8,0x7b]
+          vpinsrb $123,%eax, %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, %ebp, %xmm25, %xmm25
+// CHECK:  encoding: [0x62,0x63,0x35,0x00,0x20,0xcd,0x7b]
+          vpinsrb $123,%ebp, %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, %r13d, %xmm25, %xmm25
+// CHECK:  encoding: [0x62,0x43,0x35,0x00,0x20,0xcd,0x7b]
+          vpinsrb $123,%r13d, %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, (%rcx), %xmm25, %xmm25
+// CHECK:  encoding: [0x62,0x63,0x35,0x00,0x20,0x09,0x7b]
+          vpinsrb $123, (%rcx), %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, 291(%rax,%r14,8), %xmm25, %xmm25
+// CHECK:  encoding: [0x62,0x23,0x35,0x00,0x20,0x8c,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vpinsrb $123, 291(%rax,%r14,8), %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, 127(%rdx), %xmm25, %xmm25
+// CHECK:  encoding: [0x62,0x63,0x35,0x00,0x20,0x4a,0x7f,0x7b]
+          vpinsrb $123, 127(%rdx), %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, 128(%rdx), %xmm25, %xmm25
+// CHECK:  encoding: [0x62,0x63,0x35,0x00,0x20,0x8a,0x80,0x00,0x00,0x00,0x7b]
+          vpinsrb $123, 128(%rdx), %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, -128(%rdx), %xmm25, %xmm25
+// CHECK:  encoding: [0x62,0x63,0x35,0x00,0x20,0x4a,0x80,0x7b]
+          vpinsrb $123, -128(%rdx), %xmm25, %xmm25
+
+// CHECK: vpinsrb $123, -129(%rdx), %xmm25, %xmm25
+// CHECK:  encoding: [0x62,0x63,0x35,0x00,0x20,0x8a,0x7f,0xff,0xff,0xff,0x7b]
+          vpinsrb $123, -129(%rdx), %xmm25, %xmm25
+
+// CHECK: vpinsrw $171, %eax, %xmm25, %xmm18
+// CHECK:  encoding: [0x62,0xe1,0x35,0x00,0xc4,0xd0,0xab]
+          vpinsrw $171,%eax, %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, %eax, %xmm25, %xmm18
+// CHECK:  encoding: [0x62,0xe1,0x35,0x00,0xc4,0xd0,0x7b]
+          vpinsrw $123,%eax, %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, %ebp, %xmm25, %xmm18
+// CHECK:  encoding: [0x62,0xe1,0x35,0x00,0xc4,0xd5,0x7b]
+          vpinsrw $123,%ebp, %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, %r13d, %xmm25, %xmm18
+// CHECK:  encoding: [0x62,0xc1,0x35,0x00,0xc4,0xd5,0x7b]
+          vpinsrw $123,%r13d, %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, (%rcx), %xmm25, %xmm18
+// CHECK:  encoding: [0x62,0xe1,0x35,0x00,0xc4,0x11,0x7b]
+          vpinsrw $123, (%rcx), %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, 291(%rax,%r14,8), %xmm25, %xmm18
+// CHECK:  encoding: [0x62,0xa1,0x35,0x00,0xc4,0x94,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vpinsrw $123, 291(%rax,%r14,8), %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, 254(%rdx), %xmm25, %xmm18
+// CHECK:  encoding: [0x62,0xe1,0x35,0x00,0xc4,0x52,0x7f,0x7b]
+          vpinsrw $123, 254(%rdx), %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, 256(%rdx), %xmm25, %xmm18
+// CHECK:  encoding: [0x62,0xe1,0x35,0x00,0xc4,0x92,0x00,0x01,0x00,0x00,0x7b]
+          vpinsrw $123, 256(%rdx), %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, -256(%rdx), %xmm25, %xmm18
+// CHECK:  encoding: [0x62,0xe1,0x35,0x00,0xc4,0x52,0x80,0x7b]
+          vpinsrw $123, -256(%rdx), %xmm25, %xmm18
+
+// CHECK: vpinsrw $123, -258(%rdx), %xmm25, %xmm18
+// CHECK:  encoding: [0x62,0xe1,0x35,0x00,0xc4,0x92,0xfe,0xfe,0xff,0xff,0x7b]
+          vpinsrw $123, -258(%rdx), %xmm25, %xmm18
+
+// CHECK: vpextrw $123, %xmm28, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x15,0x21,0x7b]
+          vpextrw $123, %xmm28, (%rcx)
+
+// CHECK: vpextrw $123, %xmm28, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0x23,0x7d,0x08,0x15,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vpextrw $123, %xmm28, 291(%rax,%r14,8)
+
+// CHECK: vpextrw $123, %xmm28, 254(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x15,0x62,0x7f,0x7b]
+          vpextrw $123, %xmm28, 254(%rdx)
+
+// CHECK: vpextrw $123, %xmm28, 256(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x15,0xa2,0x00,0x01,0x00,0x00,0x7b]
+          vpextrw $123, %xmm28, 256(%rdx)
+
+// CHECK: vpextrw $123, %xmm28, -256(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x15,0x62,0x80,0x7b]
+          vpextrw $123, %xmm28, -256(%rdx)
+
+// CHECK: vpextrw $123, %xmm28, -258(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x15,0xa2,0xfe,0xfe,0xff,0xff,0x7b]
+          vpextrw $123, %xmm28, -258(%rdx)
+
+// CHECK: vpextrw $171, %xmm30, %eax
+// CHECK:  encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc6,0xab]
+          vpextrw $171, %xmm30,%rax
+
+// CHECK: vpextrw $123, %xmm30, %eax
+// CHECK:  encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc6,0x7b]
+          vpextrw $123, %xmm30,%rax
+
+// CHECK: vpextrw $123, %xmm30, %r8d
+// CHECK:  encoding: [0x62,0x11,0x7d,0x08,0xc5,0xc6,0x7b]
+          vpextrw $123, %xmm30,%r8
+
+// CHECK: vpextrw $171, %xmm28, %eax
+// CHECK:  encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0xab]
+          vpextrw $0xab, %xmm28, %eax
+
+// CHECK: vpextrw $123, %xmm28, %eax
+// CHECK:  encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0x7b]
+          vpextrw $0x7b, %xmm28, %eax
+
+// CHECK: vpextrw $123, %xmm28, %r8d
+// CHECK:  encoding: [0x62,0x11,0x7d,0x08,0xc5,0xc4,0x7b]
+          vpextrw $0x7b, %xmm28, %r8d
+
+// CHECK: vpextrw $171, %xmm28, %eax
+// CHECK:  encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0xab]
+          vpextrw $0xab, %xmm28, %eax
+
+// CHECK: vpextrw $123, %xmm28, %eax
+// CHECK:  encoding: [0x62,0x91,0x7d,0x08,0xc5,0xc4,0x7b]
+          vpextrw $0x7b, %xmm28, %eax
+
+// CHECK: vpextrw $123, %xmm28, %r8d
+// CHECK:  encoding: [0x62,0x11,0x7d,0x08,0xc5,0xc4,0x7b]
+          vpextrw $0x7b, %xmm28, %r8d
+
+// CHECK: vpextrw $171, %xmm20, %eax
+// CHECK:  encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc4,0xab]
+          vpextrw $0xab, %xmm20, %eax
+
+// CHECK: vpextrw $123, %xmm20, %eax
+// CHECK:  encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc4,0x7b]
+          vpextrw $0x7b, %xmm20, %eax
+
+// CHECK: vpextrw $123, %xmm20, %r8d
+// CHECK:  encoding: [0x62,0x31,0x7d,0x08,0xc5,0xc4,0x7b]
+          vpextrw $0x7b, %xmm20, %r8d
+
+// CHECK: vpextrw $171, %xmm19, %eax
+// CHECK:  encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc3,0xab]
+          vpextrw $0xab, %xmm19, %eax
+
+// CHECK: vpextrw $123, %xmm19, %eax
+// CHECK:  encoding: [0x62,0xb1,0x7d,0x08,0xc5,0xc3,0x7b]
+          vpextrw $0x7b, %xmm19, %eax
+
+// CHECK: vpextrw $123, %xmm19, %r8d
+// CHECK:  encoding: [0x62,0x31,0x7d,0x08,0xc5,0xc3,0x7b]
+          vpextrw $0x7b, %xmm19, %r8d
+
 // CHECK: kunpckdq %k4, %k6, %k4
 // CHECK:  encoding: [0xc4,0xe1,0xcc,0x4b,0xe4]
           kunpckdq %k4, %k6, %k4
diff --git a/llvm/test/MC/X86/x86-64-avx512dq.s b/llvm/test/MC/X86/x86-64-avx512dq.s
index 82c6869..fef9003 100644
--- a/llvm/test/MC/X86/x86-64-avx512dq.s
+++ b/llvm/test/MC/X86/x86-64-avx512dq.s
@@ -2443,6 +2443,310 @@
 // CHECK:  encoding: [0x62,0xa1,0xff,0xca,0x7a,0xd5]
           vcvtuqq2ps %zmm21, %ymm18 {%k2} {z}
 
+// CHECK: vpextrd $171, %xmm28, %eax
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x16,0xe0,0xab]
+          vpextrd $0xab, %xmm28, %eax
+
+// CHECK: vpextrd $123, %xmm28, %eax
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x16,0xe0,0x7b]
+          vpextrd $0x7b, %xmm28, %eax
+
+// CHECK: vpextrd $123, %xmm28, %ebp
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x16,0xe5,0x7b]
+          vpextrd $0x7b, %xmm28, %ebp
+
+// CHECK: vpextrd $123, %xmm28, %r13d
+// CHECK:  encoding: [0x62,0x43,0x7d,0x08,0x16,0xe5,0x7b]
+          vpextrd $0x7b, %xmm28, %r13d
+
+// CHECK: vpextrd $123, %xmm28, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x16,0x21,0x7b]
+          vpextrd $0x7b, %xmm28, (%rcx)
+
+// CHECK: vpextrd $123, %xmm28, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0x23,0x7d,0x08,0x16,0xa4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vpextrd $0x7b, %xmm28, 291(%rax,%r14,8)
+
+// CHECK: vpextrd $123, %xmm28, 508(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x16,0x62,0x7f,0x7b]
+          vpextrd $0x7b, %xmm28, 508(%rdx)
+
+// CHECK: vpextrd $123, %xmm28, 512(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x16,0xa2,0x00,0x02,0x00,0x00,0x7b]
+          vpextrd $0x7b, %xmm28, 512(%rdx)
+
+// CHECK: vpextrd $123, %xmm28, -512(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x16,0x62,0x80,0x7b]
+          vpextrd $0x7b, %xmm28, -512(%rdx)
+
+// CHECK: vpextrd $123, %xmm28, -516(%rdx)
+// CHECK:  encoding: [0x62,0x63,0x7d,0x08,0x16,0xa2,0xfc,0xfd,0xff,0xff,0x7b]
+          vpextrd $0x7b, %xmm28, -516(%rdx)
+
+// CHECK: vpextrd $171, %xmm20, %eax
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x16,0xe0,0xab]
+          vpextrd $0xab, %xmm20, %eax
+
+// CHECK: vpextrd $123, %xmm20, %eax
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x16,0xe0,0x7b]
+          vpextrd $0x7b, %xmm20, %eax
+
+// CHECK: vpextrd $123, %xmm20, %ebp
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x16,0xe5,0x7b]
+          vpextrd $0x7b, %xmm20, %ebp
+
+// CHECK: vpextrd $123, %xmm20, %r13d
+// CHECK:  encoding: [0x62,0xc3,0x7d,0x08,0x16,0xe5,0x7b]
+          vpextrd $0x7b, %xmm20, %r13d
+
+// CHECK: vpextrd $123, %xmm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x16,0x21,0x7b]
+          vpextrd $0x7b, %xmm20, (%rcx)
+
+// CHECK: vpextrd $123, %xmm20, 4660(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0x7d,0x08,0x16,0xa4,0xf0,0x34,0x12,0x00,0x00,0x7b]
+          vpextrd $0x7b, %xmm20, 4660(%rax,%r14,8)
+
+// CHECK: vpextrd $123, %xmm20, 508(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x16,0x62,0x7f,0x7b]
+          vpextrd $0x7b, %xmm20, 508(%rdx)
+
+// CHECK: vpextrd $123, %xmm20, 512(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x16,0xa2,0x00,0x02,0x00,0x00,0x7b]
+          vpextrd $0x7b, %xmm20, 512(%rdx)
+
+// CHECK: vpextrd $123, %xmm20, -512(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x16,0x62,0x80,0x7b]
+          vpextrd $0x7b, %xmm20, -512(%rdx)
+
+// CHECK: vpextrd $123, %xmm20, -516(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0x7d,0x08,0x16,0xa2,0xfc,0xfd,0xff,0xff,0x7b]
+          vpextrd $0x7b, %xmm20, -516(%rdx)
+
+// CHECK: vpextrq $171, %xmm24, %rax
+// CHECK:  encoding: [0x62,0x63,0xfd,0x08,0x16,0xc0,0xab]
+          vpextrq $0xab, %xmm24, %rax
+
+// CHECK: vpextrq $123, %xmm24, %rax
+// CHECK:  encoding: [0x62,0x63,0xfd,0x08,0x16,0xc0,0x7b]
+          vpextrq $0x7b, %xmm24, %rax
+
+// CHECK: vpextrq $123, %xmm24, %r8
+// CHECK:  encoding: [0x62,0x43,0xfd,0x08,0x16,0xc0,0x7b]
+          vpextrq $0x7b, %xmm24, %r8
+
+// CHECK: vpextrq $123, %xmm24, (%rcx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x08,0x16,0x01,0x7b]
+          vpextrq $0x7b, %xmm24, (%rcx)
+
+// CHECK: vpextrq $123, %xmm24, 291(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0x23,0xfd,0x08,0x16,0x84,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vpextrq $0x7b, %xmm24, 291(%rax,%r14,8)
+
+// CHECK: vpextrq $123, %xmm24, 1016(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x08,0x16,0x42,0x7f,0x7b]
+          vpextrq $0x7b, %xmm24, 1016(%rdx)
+
+// CHECK: vpextrq $123, %xmm24, 1024(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x08,0x16,0x82,0x00,0x04,0x00,0x00,0x7b]
+          vpextrq $0x7b, %xmm24, 1024(%rdx)
+
+// CHECK: vpextrq $123, %xmm24, -1024(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x08,0x16,0x42,0x80,0x7b]
+          vpextrq $0x7b, %xmm24, -1024(%rdx)
+
+// CHECK: vpextrq $123, %xmm24, -1032(%rdx)
+// CHECK:  encoding: [0x62,0x63,0xfd,0x08,0x16,0x82,0xf8,0xfb,0xff,0xff,0x7b]
+          vpextrq $0x7b, %xmm24, -1032(%rdx)
+
+// CHECK: vpextrq $171, %xmm20, %rax
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x08,0x16,0xe0,0xab]
+          vpextrq $0xab, %xmm20, %rax
+
+// CHECK: vpextrq $123, %xmm20, %rax
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x08,0x16,0xe0,0x7b]
+          vpextrq $0x7b, %xmm20, %rax
+
+// CHECK: vpextrq $123, %xmm20, %r8
+// CHECK:  encoding: [0x62,0xc3,0xfd,0x08,0x16,0xe0,0x7b]
+          vpextrq $0x7b, %xmm20, %r8
+
+// CHECK: vpextrq $123, %xmm20, (%rcx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x08,0x16,0x21,0x7b]
+          vpextrq $0x7b, %xmm20, (%rcx)
+
+// CHECK: vpextrq $123, %xmm20, 4660(%rax,%r14,8)
+// CHECK:  encoding: [0x62,0xa3,0xfd,0x08,0x16,0xa4,0xf0,0x34,0x12,0x00,0x00,0x7b]
+          vpextrq $0x7b, %xmm20, 4660(%rax,%r14,8)
+
+// CHECK: vpextrq $123, %xmm20, 1016(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x08,0x16,0x62,0x7f,0x7b]
+          vpextrq $0x7b, %xmm20, 1016(%rdx)
+
+// CHECK: vpextrq $123, %xmm20, 1024(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x08,0x16,0xa2,0x00,0x04,0x00,0x00,0x7b]
+          vpextrq $0x7b, %xmm20, 1024(%rdx)
+
+// CHECK: vpextrq $123, %xmm20, -1024(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x08,0x16,0x62,0x80,0x7b]
+          vpextrq $0x7b, %xmm20, -1024(%rdx)
+
+// CHECK: vpextrq $123, %xmm20, -1032(%rdx)
+// CHECK:  encoding: [0x62,0xe3,0xfd,0x08,0x16,0xa2,0xf8,0xfb,0xff,0xff,0x7b]
+          vpextrq $0x7b, %xmm20, -1032(%rdx)
+
+// CHECK: vpinsrd $171, %eax, %xmm25, %xmm23
+// CHECK:  encoding: [0x62,0xe3,0x35,0x00,0x22,0xf8,0xab]
+          vpinsrd $0xab,%eax, %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, %eax, %xmm25, %xmm23
+// CHECK:  encoding: [0x62,0xe3,0x35,0x00,0x22,0xf8,0x7b]
+          vpinsrd $0x7b,%eax, %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, %ebp, %xmm25, %xmm23
+// CHECK:  encoding: [0x62,0xe3,0x35,0x00,0x22,0xfd,0x7b]
+          vpinsrd $0x7b,%ebp, %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, %r13d, %xmm25, %xmm23
+// CHECK:  encoding: [0x62,0xc3,0x35,0x00,0x22,0xfd,0x7b]
+          vpinsrd $0x7b,%r13d, %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, (%rcx), %xmm25, %xmm23
+// CHECK:  encoding: [0x62,0xe3,0x35,0x00,0x22,0x39,0x7b]
+          vpinsrd $0x7b,(%rcx), %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, 291(%rax,%r14,8), %xmm25, %xmm23
+// CHECK:  encoding: [0x62,0xa3,0x35,0x00,0x22,0xbc,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vpinsrd $0x7b,291(%rax,%r14,8), %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, 508(%rdx), %xmm25, %xmm23
+// CHECK:  encoding: [0x62,0xe3,0x35,0x00,0x22,0x7a,0x7f,0x7b]
+          vpinsrd $0x7b,508(%rdx), %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, 512(%rdx), %xmm25, %xmm23
+// CHECK:  encoding: [0x62,0xe3,0x35,0x00,0x22,0xba,0x00,0x02,0x00,0x00,0x7b]
+          vpinsrd $0x7b,512(%rdx), %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, -512(%rdx), %xmm25, %xmm23
+// CHECK:  encoding: [0x62,0xe3,0x35,0x00,0x22,0x7a,0x80,0x7b]
+          vpinsrd $0x7b,-512(%rdx), %xmm25, %xmm23
+
+// CHECK: vpinsrd $123, -516(%rdx), %xmm25, %xmm23
+// CHECK:  encoding: [0x62,0xe3,0x35,0x00,0x22,0xba,0xfc,0xfd,0xff,0xff,0x7b]
+          vpinsrd $0x7b,-516(%rdx), %xmm25, %xmm23
+
+// CHECK: vpinsrd $171, %eax, %xmm29, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0x15,0x00,0x22,0xf0,0xab]
+          vpinsrd $0xab,%eax, %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, %eax, %xmm29, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0x15,0x00,0x22,0xf0,0x7b]
+          vpinsrd $0x7b,%eax, %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, %ebp, %xmm29, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0x15,0x00,0x22,0xf5,0x7b]
+          vpinsrd $0x7b,%ebp, %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, %r13d, %xmm29, %xmm22
+// CHECK:  encoding: [0x62,0xc3,0x15,0x00,0x22,0xf5,0x7b]
+          vpinsrd $0x7b,%r13d, %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, (%rcx), %xmm29, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0x15,0x00,0x22,0x31,0x7b]
+          vpinsrd $0x7b,(%rcx), %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, 4660(%rax,%r14,8), %xmm29, %xmm22
+// CHECK:  encoding: [0x62,0xa3,0x15,0x00,0x22,0xb4,0xf0,0x34,0x12,0x00,0x00,0x7b]
+          vpinsrd $0x7b,4660(%rax,%r14,8), %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, 508(%rdx), %xmm29, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0x15,0x00,0x22,0x72,0x7f,0x7b]
+          vpinsrd $0x7b,508(%rdx), %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, 512(%rdx), %xmm29, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0x15,0x00,0x22,0xb2,0x00,0x02,0x00,0x00,0x7b]
+          vpinsrd $0x7b,512(%rdx), %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, -512(%rdx), %xmm29, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0x15,0x00,0x22,0x72,0x80,0x7b]
+          vpinsrd $0x7b,-512(%rdx), %xmm29, %xmm22
+
+// CHECK: vpinsrd $123, -516(%rdx), %xmm29, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0x15,0x00,0x22,0xb2,0xfc,0xfd,0xff,0xff,0x7b]
+          vpinsrd $0x7b,-516(%rdx), %xmm29, %xmm22
+
+// CHECK: vpinsrq $171, %rax, %xmm20, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0xdd,0x00,0x22,0xf0,0xab]
+          vpinsrq $0xab,%rax, %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, %rax, %xmm20, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0xdd,0x00,0x22,0xf0,0x7b]
+          vpinsrq $0x7b,%rax, %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, %r8, %xmm20, %xmm22
+// CHECK:  encoding: [0x62,0xc3,0xdd,0x00,0x22,0xf0,0x7b]
+          vpinsrq $0x7b,%r8, %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, (%rcx), %xmm20, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0xdd,0x00,0x22,0x31,0x7b]
+          vpinsrq $0x7b,(%rcx), %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, 291(%rax,%r14,8), %xmm20, %xmm22
+// CHECK:  encoding: [0x62,0xa3,0xdd,0x00,0x22,0xb4,0xf0,0x23,0x01,0x00,0x00,0x7b]
+          vpinsrq $0x7b,291(%rax,%r14,8), %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, 1016(%rdx), %xmm20, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0xdd,0x00,0x22,0x72,0x7f,0x7b]
+          vpinsrq $0x7b,1016(%rdx), %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, 1024(%rdx), %xmm20, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0xdd,0x00,0x22,0xb2,0x00,0x04,0x00,0x00,0x7b]
+          vpinsrq $0x7b,1024(%rdx), %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, -1024(%rdx), %xmm20, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0xdd,0x00,0x22,0x72,0x80,0x7b]
+          vpinsrq $0x7b,-1024(%rdx), %xmm20, %xmm22
+
+// CHECK: vpinsrq $123, -1032(%rdx), %xmm20, %xmm22
+// CHECK:  encoding: [0x62,0xe3,0xdd,0x00,0x22,0xb2,0xf8,0xfb,0xff,0xff,0x7b]
+          vpinsrq $0x7b,-1032(%rdx), %xmm20, %xmm22
+
+// CHECK: vpinsrq $171, %rax, %xmm19, %xmm25
+// CHECK:  encoding: [0x62,0x63,0xe5,0x00,0x22,0xc8,0xab]
+          vpinsrq $0xab,%rax, %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, %rax, %xmm19, %xmm25
+// CHECK:  encoding: [0x62,0x63,0xe5,0x00,0x22,0xc8,0x7b]
+          vpinsrq $0x7b,%rax, %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, %r8, %xmm19, %xmm25
+// CHECK:  encoding: [0x62,0x43,0xe5,0x00,0x22,0xc8,0x7b]
+          vpinsrq $0x7b,%r8, %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, (%rcx), %xmm19, %xmm25
+// CHECK:  encoding: [0x62,0x63,0xe5,0x00,0x22,0x09,0x7b]
+          vpinsrq $0x7b,(%rcx), %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, 4660(%rax,%r14,8), %xmm19, %xmm25
+// CHECK:  encoding: [0x62,0x23,0xe5,0x00,0x22,0x8c,0xf0,0x34,0x12,0x00,0x00,0x7b]
+          vpinsrq $0x7b,4660(%rax,%r14,8), %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, 1016(%rdx), %xmm19, %xmm25
+// CHECK:  encoding: [0x62,0x63,0xe5,0x00,0x22,0x4a,0x7f,0x7b]
+          vpinsrq $0x7b,1016(%rdx), %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, 1024(%rdx), %xmm19, %xmm25
+// CHECK:  encoding: [0x62,0x63,0xe5,0x00,0x22,0x8a,0x00,0x04,0x00,0x00,0x7b]
+          vpinsrq $0x7b,1024(%rdx), %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, -1024(%rdx), %xmm19, %xmm25
+// CHECK:  encoding: [0x62,0x63,0xe5,0x00,0x22,0x4a,0x80,0x7b]
+          vpinsrq $0x7b,-1024(%rdx), %xmm19, %xmm25
+
+// CHECK: vpinsrq $123, -1032(%rdx), %xmm19, %xmm25
+// CHECK:  encoding: [0x62,0x63,0xe5,0x00,0x22,0x8a,0xf8,0xfb,0xff,0xff,0x7b]
+          vpinsrq $0x7b,-1032(%rdx), %xmm19, %xmm25
+
 // CHECK: vinsertf32x8 $171, %ymm24, %zmm17, %zmm29
 // CHECK:  encoding: [0x62,0x03,0x75,0x40,0x1a,0xe8,0xab]
           vinsertf32x8 $0xab, %ymm24, %zmm17, %zmm29