Affine function in 64 bit Mac use movd instead of movq for xmm to gpr.
BUG=69
TEST=NONE
Review URL: https://webrtc-codereview.appspot.com/728011

git-svn-id: http://libyuv.googlecode.com/svn/trunk@323 16f28f9a-4ce2-e073-06de-1de4eb20be90
diff --git a/source/row_posix.cc b/source/row_posix.cc
index 8976fe1..ddfd278 100644
--- a/source/row_posix.cc
+++ b/source/row_posix.cc
@@ -3254,6 +3254,9 @@
 // TODO(fbarchard): Find 64 bit way to avoid masking.
 // TODO(fbarchard): Investigate why 4 pixels is slower than 2 on Core2.
 // Copy ARGB pixels from source image with slope to a row of destination.
+// Caveat - in 64 bit, movd is used with 64 bit gpr due to Mac gcc producing
+// an error if movq is used.  movd  %%xmm0,%1
+
 void ARGBAffineRow_SSE2(const uint8* src_argb, int src_argb_stride,
                         uint8* dst_argb, const float* uv_dudv, int width) {
   intptr_t src_argb_stride_temp = src_argb_stride;
@@ -3286,7 +3289,7 @@
     "packssdw  %%xmm1,%%xmm0                   \n"
     "pmaddwd   %%xmm5,%%xmm0                   \n"
 #if defined(__x86_64__)
-    "movq      %%xmm0,%1                       \n"
+    "movd      %%xmm0,%1                       \n"
     "mov       %1,%5                           \n"
     "and       $0x0fffffff,%1                  \n"
     "shr       $32,%5                          \n"
@@ -3303,7 +3306,7 @@
     "addps     %%xmm4,%%xmm2                   \n"
     "movq      %%xmm1,(%2)                     \n"
 #if defined(__x86_64__)
-    "movq      %%xmm0,%1                       \n"
+    "movd      %%xmm0,%1                       \n"
     "mov       %1,%5                           \n"
     "and       $0x0fffffff,%1                  \n"
     "shr       $32,%5                          \n"