Loosen up iv reuse to allow reuse of the same stride but a larger type when truncating from the larger type to smaller type is free.
e.g.
Turns this loop:
LBB1_1: # entry.bb_crit_edge
        xorl    %ecx, %ecx
        xorw    %dx, %dx
        movw    %dx, %si
LBB1_2: # bb
        movl    L_X$non_lazy_ptr, %edi
        movw    %si, (%edi)
        movl    L_Y$non_lazy_ptr, %edi
        movw    %dx, (%edi)
		addw    $4, %dx
		incw    %si
		incl    %ecx
		cmpl    %eax, %ecx
		jne     LBB1_2  # bb
	
into

LBB1_1: # entry.bb_crit_edge
        xorl    %ecx, %ecx
        xorw    %dx, %dx
LBB1_2: # bb
        movl    L_X$non_lazy_ptr, %esi
        movw    %cx, (%esi)
        movl    L_Y$non_lazy_ptr, %esi
        movw    %dx, (%esi)
        addw    $4, %dx
		incl    %ecx
        cmpl    %eax, %ecx
        jne     LBB1_2  # bb


git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@43375 91177308-0d34-0410-b5e6-96231b3b80d8
diff --git a/test/CodeGen/X86/loop-strength-reduce5.ll b/test/CodeGen/X86/loop-strength-reduce5.ll
new file mode 100644
index 0000000..6e037e2
--- /dev/null
+++ b/test/CodeGen/X86/loop-strength-reduce5.ll
@@ -0,0 +1,23 @@
+; RUN: llvm-as < %s | llc -march=x86 | grep inc | count 1
+
+@X = weak global i16 0		; <i16*> [#uses=1]
+@Y = weak global i16 0		; <i16*> [#uses=1]
+
+define void @foo(i32 %N) {
+entry:
+	%tmp1019 = icmp sgt i32 %N, 0		; <i1> [#uses=1]
+	br i1 %tmp1019, label %bb, label %return
+
+bb:		; preds = %bb, %entry
+	%i.014.0 = phi i32 [ 0, %entry ], [ %indvar.next, %bb ]		; <i32> [#uses=2]
+	%tmp1 = trunc i32 %i.014.0 to i16		; <i16> [#uses=2]
+	volatile store i16 %tmp1, i16* @X, align 2
+	%tmp34 = shl i16 %tmp1, 2		; <i16> [#uses=1]
+	volatile store i16 %tmp34, i16* @Y, align 2
+	%indvar.next = add i32 %i.014.0, 1		; <i32> [#uses=2]
+	%exitcond = icmp eq i32 %indvar.next, %N		; <i1> [#uses=1]
+	br i1 %exitcond, label %return, label %bb
+
+return:		; preds = %bb, %entry
+	ret void
+}