Rename invariant.group.barrier to launder.invariant.group

Summary:
This is one of the initial commit of "RFC: Devirtualization v2" proposal:
https://docs.google.com/document/d/16GVtCpzK8sIHNc2qZz6RN8amICNBtvjWUod2SujZVEo/edit?usp=sharing

Reviewers: rsmith, amharc, kuhar, sanjoy

Subscribers: arsenm, nhaehnle, javed.absar, hiraditya, llvm-commits

Differential Revision: https://reviews.llvm.org/D45111

llvm-svn: 331448
diff --git a/llvm/test/Analysis/MemorySSA/invariant-groups.ll b/llvm/test/Analysis/MemorySSA/invariant-groups.ll
index 062b574..a2535d5 100644
--- a/llvm/test/Analysis/MemorySSA/invariant-groups.ll
+++ b/llvm/test/Analysis/MemorySSA/invariant-groups.ll
@@ -1,7 +1,7 @@
 ; RUN: opt -basicaa -print-memoryssa -verify-memoryssa -analyze < %s 2>&1 | FileCheck %s
 ;
 ; Currently, MemorySSA doesn't support invariant groups. So, we should ignore
-; invariant.group.barrier intrinsics entirely. We'll need to pay attention to
+; launder.invariant.group intrinsics entirely. We'll need to pay attention to
 ; them when/if we decide to support invariant groups.
 
 @g = external global i32
@@ -17,8 +17,8 @@
 
   %1 = bitcast i32* %a to i8*
 ; CHECK:  3 = MemoryDef(2)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
-  %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+  %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
   %a32 = bitcast i8* %a8 to i32*
 
 ; This have to be MemoryUse(2), because we can't skip the barrier based on
@@ -36,8 +36,8 @@
 
   %1 = bitcast i32* %a to i8*
 ; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
-  %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)  
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+  %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)  
   %a32 = bitcast i8* %a8 to i32*
 
 ; We can skip the barrier only if the "skip" is not based on !invariant.group.
@@ -55,8 +55,8 @@
 
   %1 = bitcast i32* %a to i8*
 ; CHECK: 1 = MemoryDef(liveOnEntry)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
-  %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+  %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
   %a32 = bitcast i8* %a8 to i32*
 
 ; We can skip the barrier only if the "skip" is not based on !invariant.group.
@@ -86,8 +86,8 @@
   store i32 1, i32* @g, align 4
   %1 = bitcast i32* %a to i8*
 ; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
-  %a8 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %1)
+; CHECK-NEXT: %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
+  %a8 = call i8* @llvm.launder.invariant.group.p0i8(i8* %1)
   %a32 = bitcast i8* %a8 to i32*
 
 ; CHECK: MemoryUse(2)
@@ -145,8 +145,8 @@
   call void @clobber8(i8* %p)
 
 ; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
-  %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+  %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
   br i1 undef, label %Loop.Body, label %Loop.End
 
 Loop.Body:
@@ -192,8 +192,8 @@
   call void @clobber8(i8* %p)
 
 ; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
-  %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+  %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
   br i1 undef, label %Loop.Body, label %Loop.End
 
 Loop.Body:
@@ -253,8 +253,8 @@
 ; CHECK-NEXT: call void @clobber
   call void @clobber8(i8* %p)
 ; CHECK: 3 = MemoryDef(2)
-; CHECK-NEXT: %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
-  %after = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p)
+; CHECK-NEXT: %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
+  %after = call i8* @llvm.launder.invariant.group.p0i8(i8* %p)
   br i1 undef, label %Loop.Pre, label %Loop.End
 
 Loop.Pre:
@@ -302,12 +302,12 @@
 ; CHECK-NEXT: store i8 42, i8* %ptr, !invariant.group !0
   store i8 42, i8* %ptr, !invariant.group !0
 ; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
-  %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+  %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
 ; FIXME: This one could be CSEd.
 ; CHECK: 3 = MemoryDef(2)
-; CHECK: call i8* @llvm.invariant.group.barrier
-  %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group
+  %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
 ; CHECK: 4 = MemoryDef(3)
 ; CHECK-NEXT: call void @clobber8(i8* %ptr)
   call void @clobber8(i8* %ptr)
@@ -331,13 +331,13 @@
 ; CHECK-NEXT: store i8 42, i8* %ptr, !invariant.group !0
   store i8 42, i8* %ptr, !invariant.group !0
 ; CHECK: 2 = MemoryDef(1)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
-  %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+  %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
 ; CHECK: 3 = MemoryDef(2)
   store i8 43, i8* %ptr
 ; CHECK: 4 = MemoryDef(3)
-; CHECK-NEXT: call i8* @llvm.invariant.group.barrier
-  %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+; CHECK-NEXT: call i8* @llvm.launder.invariant.group
+  %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
 ; CHECK: 5 = MemoryDef(4)
 ; CHECK-NEXT: call void @clobber8(i8* %ptr)
   call void @clobber8(i8* %ptr)
@@ -354,7 +354,7 @@
 }
 
 
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
 declare void @clobber(i32*)
 declare void @clobber8(i8*)
 declare void @use(i8* readonly)
diff --git a/llvm/test/Bitcode/upgrade-invariant-group-barrier.ll b/llvm/test/Bitcode/upgrade-invariant-group-barrier.ll
new file mode 100644
index 0000000..d187489
--- /dev/null
+++ b/llvm/test/Bitcode/upgrade-invariant-group-barrier.ll
@@ -0,0 +1,22 @@
+; RUN: opt -S < %s | FileCheck %s
+
+; The intrinsic firstly only took i8*, then it was made polimorphic, then
+; it was renamed to launder.invariant.group
+define void @test(i8* %p1, i16* %p16) {
+; CHECK-LABEL: @test
+; CHECK: %p2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p1)
+; CHECK: %p3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %p1)
+; CHECK: %p4 = call i16* @llvm.launder.invariant.group.p0i16(i16* %p16)
+  %p2 = call i8* @llvm.invariant.group.barrier(i8* %p1)
+  %p3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %p1)
+  %p4 = call i16* @llvm.invariant.group.barrier.p0i16(i16* %p16)
+  ret void
+}
+
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable
+; CHECK: declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable
+; CHECK: declare i16* @llvm.launder.invariant.group.p0i16(i16*)
+declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i16* @llvm.invariant.group.barrier.p0i16(i16*)
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll b/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
index 179dd51..34b2a56 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/unknown-intrinsic.ll
@@ -1,10 +1,10 @@
 ; RUN: llc -O0 -mtriple=arm64 < %s
 
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
 
 define i8* @barrier(i8* %p) {
-; CHECK: bl llvm.invariant.group.barrier
-        %q = call i8* @llvm.invariant.group.barrier(i8* %p)
+; CHECK: bl llvm.launder.invariant.group
+        %q = call i8* @llvm.launder.invariant.group(i8* %p)
         ret i8* %q
 }
 
diff --git a/llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll b/llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
index 768d520..3a8b45f 100644
--- a/llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
+++ b/llvm/test/CodeGen/AMDGPU/promote-alloca-invariant-markers.ll
@@ -3,7 +3,7 @@
 
 declare {}* @llvm.invariant.start.p5i8(i64, i8 addrspace(5)* nocapture) #0
 declare void @llvm.invariant.end.p5i8({}*, i64, i8 addrspace(5)* nocapture) #0
-declare i8 addrspace(5)* @llvm.invariant.group.barrier.p5i8(i8 addrspace(5)*) #1
+declare i8 addrspace(5)* @llvm.launder.invariant.group.p5i8(i8 addrspace(5)*) #1
 
 ; GCN-LABEL: {{^}}use_invariant_promotable_lds:
 ; GCN: buffer_load_dword
@@ -17,7 +17,7 @@
   store i32 %tmp3, i32 addrspace(5)* %tmp
   %tmp4 = call {}* @llvm.invariant.start.p5i8(i64 4, i8 addrspace(5)* %tmp1) #0
   call void @llvm.invariant.end.p5i8({}* %tmp4, i64 4, i8 addrspace(5)* %tmp1) #0
-  %tmp5 = call i8 addrspace(5)* @llvm.invariant.group.barrier.p5i8(i8 addrspace(5)* %tmp1) #1
+  %tmp5 = call i8 addrspace(5)* @llvm.launder.invariant.group.p5i8(i8 addrspace(5)* %tmp1) #1
   ret void
 }
 
diff --git a/llvm/test/CodeGen/Generic/intrinsics.ll b/llvm/test/CodeGen/Generic/intrinsics.ll
index 6a51d2d..3964968 100644
--- a/llvm/test/CodeGen/Generic/intrinsics.ll
+++ b/llvm/test/CodeGen/Generic/intrinsics.ll
@@ -39,10 +39,10 @@
         ret double %I
 }
 
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
 
 define i8* @barrier(i8* %p) {
-        %q = call i8* @llvm.invariant.group.barrier(i8* %p)
+        %q = call i8* @llvm.launder.invariant.group(i8* %p)
         ret i8* %q
 }
 
diff --git a/llvm/test/Other/Inputs/invariant.group.barrier.ll b/llvm/test/Other/Inputs/invariant.group.barrier.ll
deleted file mode 100644
index 565b098..0000000
--- a/llvm/test/Other/Inputs/invariant.group.barrier.ll
+++ /dev/null
@@ -1,15 +0,0 @@
-; RUN: opt -S -gvn < %s | FileCheck %s
-; RUN: opt -S -newgvn < %s | FileCheck %s
-; RUN: opt -S -O3 < %s | FileCheck %s
-
-; This test check if optimizer is not proving equality based on mustalias
-; CHECK-LABEL: define void @dontProveEquality(i8* %a) 
-define void @dontProveEquality(i8* %a) {
-  %b = call i8* @llvm.invariant.group.barrier(i8* %a)
-  %r = i1 icmp eq i8* %b, i8* %a
-;CHECK: call void @use(%r)
-  call void @use(%r)
-}
-
-declare void @use(i1)
-declare i8* @llvm.invariant.group.barrier(i8 *)
diff --git a/llvm/test/Other/invariant.group.barrier.ll b/llvm/test/Other/invariant.group.barrier.ll
deleted file mode 100644
index 5ba4fcc..0000000
--- a/llvm/test/Other/invariant.group.barrier.ll
+++ /dev/null
@@ -1,83 +0,0 @@
-; RUN: opt -S -early-cse < %s | FileCheck %s
-; RUN: opt -S -gvn < %s | FileCheck %s
-; RUN: opt -S -newgvn < %s | FileCheck %s
-; RUN: opt -S -O3 < %s | FileCheck %s
-
-; These tests checks if passes with CSE functionality can do CSE on
-; invariant.group.barrier, that is prohibited if there is a memory clobber
-; between barriers call.
-
-; CHECK-LABEL: define i8 @optimizable()
-define i8 @optimizable() {
-entry:
-    %ptr = alloca i8
-    store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
-    %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; FIXME: This one could be CSE
-; CHECK: call i8* @llvm.invariant.group.barrier
-    %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; CHECK: call void @clobber(i8* {{.*}}%ptr)
-    call void @clobber(i8* %ptr)
-
-; CHECK: call void @use(i8* {{.*}}%ptr2)
-    call void @use(i8* %ptr2)
-; CHECK: call void @use(i8* {{.*}}%ptr3)
-    call void @use(i8* %ptr3)
-; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
-    %v = load i8, i8* %ptr3, !invariant.group !0
-
-    ret i8 %v
-}
-
-; CHECK-LABEL: define i8 @unoptimizable()
-define i8 @unoptimizable() {
-entry:
-    %ptr = alloca i8
-    store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
-    %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-    call void @clobber(i8* %ptr)
-; CHECK: call i8* @llvm.invariant.group.barrier.p0i8
-    %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; CHECK: call void @clobber(i8* {{.*}}%ptr)
-    call void @clobber(i8* %ptr)
-; CHECK: call void @use(i8* {{.*}}%ptr2)
-    call void @use(i8* %ptr2)
-; CHECK: call void @use(i8* {{.*}}%ptr3)
-    call void @use(i8* %ptr3)
-; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
-    %v = load i8, i8* %ptr3, !invariant.group !0
-
-    ret i8 %v
-}
-
-; CHECK-LABEL: define i8 @unoptimizable2()
-define i8 @unoptimizable2() {
-    %ptr = alloca i8
-    store i8 42, i8* %ptr, !invariant.group !0
-; CHECK: call i8* @llvm.invariant.group.barrier
-    %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-    store i8 43, i8* %ptr
-; CHECK: call i8* @llvm.invariant.group.barrier
-    %ptr3 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; CHECK: call void @clobber(i8* {{.*}}%ptr)
-    call void @clobber(i8* %ptr)
-; CHECK: call void @use(i8* {{.*}}%ptr2)
-    call void @use(i8* %ptr2)
-; CHECK: call void @use(i8* {{.*}}%ptr3)
-    call void @use(i8* %ptr3)
-; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
-    %v = load i8, i8* %ptr3, !invariant.group !0
-    ret i8 %v
-}
-
-declare void @use(i8* readonly)
-
-declare void @clobber(i8*)
-; CHECK: Function Attrs: inaccessiblememonly nounwind{{$}}
-; CHECK-NEXT: declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
-
-!0 = !{}
-
diff --git a/llvm/test/Other/launder.invariant.group.ll b/llvm/test/Other/launder.invariant.group.ll
new file mode 100644
index 0000000..ebb2819
--- /dev/null
+++ b/llvm/test/Other/launder.invariant.group.ll
@@ -0,0 +1,94 @@
+; RUN: opt -S -early-cse < %s | FileCheck %s
+; RUN: opt -S -gvn < %s | FileCheck %s
+; RUN: opt -S -newgvn < %s | FileCheck %s
+; RUN: opt -S -O3 < %s | FileCheck %s
+
+; These tests checks if passes with CSE functionality can do CSE on
+; launder.invariant.group, that is prohibited if there is a memory clobber
+; between barriers call.
+
+; CHECK-LABEL: define i8 @optimizable()
+define i8 @optimizable() {
+entry:
+    %ptr = alloca i8
+    store i8 42, i8* %ptr, !invariant.group !0
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; FIXME: This one could be CSE
+; CHECK: call i8* @llvm.launder.invariant.group
+    %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; CHECK: call void @clobber(i8* {{.*}}%ptr)
+    call void @clobber(i8* %ptr)
+
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+    call void @use(i8* %ptr2)
+; CHECK: call void @use(i8* {{.*}}%ptr3)
+    call void @use(i8* %ptr3)
+; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
+    %v = load i8, i8* %ptr3, !invariant.group !0
+
+    ret i8 %v
+}
+
+; CHECK-LABEL: define i8 @unoptimizable()
+define i8 @unoptimizable() {
+entry:
+    %ptr = alloca i8
+    store i8 42, i8* %ptr, !invariant.group !0
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+    call void @clobber(i8* %ptr)
+; CHECK: call i8* @llvm.launder.invariant.group.p0i8
+    %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; CHECK: call void @clobber(i8* {{.*}}%ptr)
+    call void @clobber(i8* %ptr)
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+    call void @use(i8* %ptr2)
+; CHECK: call void @use(i8* {{.*}}%ptr3)
+    call void @use(i8* %ptr3)
+; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
+    %v = load i8, i8* %ptr3, !invariant.group !0
+
+    ret i8 %v
+}
+
+; CHECK-LABEL: define i8 @unoptimizable2()
+define i8 @unoptimizable2() {
+    %ptr = alloca i8
+    store i8 42, i8* %ptr, !invariant.group !0
+; CHECK: call i8* @llvm.launder.invariant.group
+    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+    store i8 43, i8* %ptr
+; CHECK: call i8* @llvm.launder.invariant.group
+    %ptr3 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
+; CHECK: call void @clobber(i8* {{.*}}%ptr)
+    call void @clobber(i8* %ptr)
+; CHECK: call void @use(i8* {{.*}}%ptr2)
+    call void @use(i8* %ptr2)
+; CHECK: call void @use(i8* {{.*}}%ptr3)
+    call void @use(i8* %ptr3)
+; CHECK: load i8, i8* %ptr3, {{.*}}!invariant.group
+    %v = load i8, i8* %ptr3, !invariant.group !0
+    ret i8 %v
+}
+
+; This test check if optimizer is not proving equality based on mustalias
+; CHECK-LABEL: define void @dontProveEquality(i8* %a)
+define void @dontProveEquality(i8* %a) {
+  %b = call i8* @llvm.launder.invariant.group.p0i8(i8* %a)
+  %r = icmp eq i8* %b, %a
+;CHECK: call void @useBool(i1 %r)
+  call void @useBool(i1 %r)
+  ret void
+}
+
+declare void @use(i8* readonly)
+declare void @useBool(i1)
+
+declare void @clobber(i8*)
+; CHECK: Function Attrs: inaccessiblememonly nounwind speculatable{{$}}
+; CHECK-NEXT: declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
+
+!0 = !{}
+
diff --git a/llvm/test/Transforms/CodeGenPrepare/invariant.group.ll b/llvm/test/Transforms/CodeGenPrepare/invariant.group.ll
index 042e58b..3ad1ca7 100644
--- a/llvm/test/Transforms/CodeGenPrepare/invariant.group.ll
+++ b/llvm/test/Transforms/CodeGenPrepare/invariant.group.ll
@@ -6,10 +6,10 @@
 define void @foo() {
 enter:
   ; CHECK-NOT: !invariant.group
-  ; CHECK-NOT: @llvm.invariant.group.barrier.p0i8(
+  ; CHECK-NOT: @llvm.launder.invariant.group.p0i8(
   ; CHECK: %val = load i8, i8* @tmp, !tbaa
   %val = load i8, i8* @tmp, !invariant.group !0, !tbaa !{!1, !1, i64 0}
-  %ptr = call i8* @llvm.invariant.group.barrier.p0i8(i8* @tmp)
+  %ptr = call i8* @llvm.launder.invariant.group.p0i8(i8* @tmp)
   
   ; CHECK: store i8 42, i8* @tmp
   store i8 42, i8* %ptr, !invariant.group !0
@@ -18,7 +18,7 @@
 }
 ; CHECK-LABEL: }
 
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
 
 !0 = !{!"something"}
 !1 = !{!"x", !0}
diff --git a/llvm/test/Transforms/GVN/invariant.group.ll b/llvm/test/Transforms/GVN/invariant.group.ll
index 1bc1f49..8135087 100644
--- a/llvm/test/Transforms/GVN/invariant.group.ll
+++ b/llvm/test/Transforms/GVN/invariant.group.ll
@@ -25,7 +25,7 @@
 entry:
     %ptr = alloca i8
     store i8 42, i8* %ptr, !invariant.group !0
-    %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
     %a = load i8, i8* %ptr, !invariant.group !0
     
     call void @foo(i8* %ptr2); call to use %ptr2
@@ -242,7 +242,7 @@
 entry:
     %ptr = alloca i8
     store i8 42, i8* %ptr, !invariant.group !0
-    %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
 ; CHECK-NOT: load
     %a = load i8, i8* %ptr2, !invariant.group !0
     
@@ -314,7 +314,7 @@
 ; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0
     store i8 %unknownValue, i8* %ptr, !invariant.group !0 
 
-    %newPtr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+    %newPtr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
 ; CHECK-NOT: load
     %d = load i8, i8* %newPtr2, !invariant.group !0
 ; CHECK: ret i8 %unknownValue
@@ -441,7 +441,7 @@
 declare void @_ZN1AC1Ev(%struct.A*)
 declare void @fooBit(i1*, i1)
 
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
 
 ; Function Attrs: nounwind
 declare void @llvm.assume(i1 %cmp.vtables) #0
diff --git a/llvm/test/Transforms/GlobalOpt/invariant.group.barrier.ll b/llvm/test/Transforms/GlobalOpt/invariant.group.barrier.ll
index 80cd411..744ab91 100644
--- a/llvm/test/Transforms/GlobalOpt/invariant.group.barrier.ll
+++ b/llvm/test/Transforms/GlobalOpt/invariant.group.barrier.ll
@@ -33,7 +33,7 @@
   store i32 %val, i32* %valptr
   
   %0 = bitcast i32* %valptr to i8*
-  %barr = call i8* @llvm.invariant.group.barrier(i8* %0)
+  %barr = call i8* @llvm.launder.invariant.group(i8* %0)
   %1 = bitcast i8* %barr to i32*
   
   %val2 = load i32, i32* %1
@@ -41,7 +41,7 @@
   ret void
 }
 
-; We can't step through invariant.group.barrier here, because that would change
+; We can't step through launder.invariant.group here, because that would change
 ; this load in @usage_of_globals()
 ; val = load i32, i32* %ptrVal, !invariant.group !0 
 ; into 
@@ -54,7 +54,7 @@
   store i32 13, i32* @tmp3, !invariant.group !0
   
   %0 = bitcast i32* @tmp3 to i8*
-  %barr = call i8* @llvm.invariant.group.barrier(i8* %0)
+  %barr = call i8* @llvm.launder.invariant.group(i8* %0)
   %1 = bitcast i8* %barr to i32*
   
   store i32* %1, i32** @ptrToTmp3
@@ -74,6 +74,6 @@
 
 declare void @changeTmp3ValAndCallBarrierInside()
 
-declare i8* @llvm.invariant.group.barrier(i8*)
+declare i8* @llvm.launder.invariant.group(i8*)
 
 !0 = !{!"something"}
diff --git a/llvm/test/Transforms/NewGVN/invariant.group.ll b/llvm/test/Transforms/NewGVN/invariant.group.ll
index 9839fc4..d5890f1 100644
--- a/llvm/test/Transforms/NewGVN/invariant.group.ll
+++ b/llvm/test/Transforms/NewGVN/invariant.group.ll
@@ -26,7 +26,7 @@
 entry:
     %ptr = alloca i8
     store i8 42, i8* %ptr, !invariant.group !0
-    %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
     %a = load i8, i8* %ptr, !invariant.group !0
     
     call void @foo(i8* %ptr2); call to use %ptr2
@@ -243,8 +243,7 @@
 entry:
     %ptr = alloca i8
     store i8 42, i8* %ptr, !invariant.group !0
-    %ptr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
-; CHECK-NOT: load
+    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
     %a = load i8, i8* %ptr2, !invariant.group !0
     
 ; CHECK: ret i8 42
@@ -315,7 +314,7 @@
 ; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0
     store i8 %unknownValue, i8* %ptr, !invariant.group !0 
 
-    %newPtr2 = call i8* @llvm.invariant.group.barrier.p0i8(i8* %ptr)
+    %newPtr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
 ; CHECK-NOT: load
     %d = load i8, i8* %newPtr2, !invariant.group !0
 ; CHECK: ret i8 %unknownValue
@@ -442,7 +441,7 @@
 declare void @_ZN1AC1Ev(%struct.A*)
 declare void @fooBit(i1*, i1)
 
-declare i8* @llvm.invariant.group.barrier.p0i8(i8*)
+declare i8* @llvm.launder.invariant.group.p0i8(i8*)
 
 ; Function Attrs: nounwind
 declare void @llvm.assume(i1 %cmp.vtables) #0