On amd64, allow the register allocator to use %r10 which it previously
did not.  This gives a 0%-3% speedup, mostly closer to the 0% end.



git-svn-id: svn://svn.valgrind.org/vex/trunk@1680 8f6e269a-dfd6-0310-a8e1-e2731360e62c
diff --git a/priv/host-amd64/hdefs.c b/priv/host-amd64/hdefs.c
index a703180..602b197 100644
--- a/priv/host-amd64/hdefs.c
+++ b/priv/host-amd64/hdefs.c
@@ -168,7 +168,7 @@
    (*arr)[ 5] = hregAMD64_XMM9();
 #endif
 #if 1
-   *nregs = 19;
+   *nregs = 20;
    *arr = LibVEX_Alloc(*nregs * sizeof(HReg));
    (*arr)[ 0] = hregAMD64_RSI();
    (*arr)[ 1] = hregAMD64_RDI();
@@ -190,7 +190,7 @@
    (*arr)[16] = hregAMD64_XMM10();
    (*arr)[17] = hregAMD64_XMM11();
    (*arr)[18] = hregAMD64_XMM12();
-
+   (*arr)[19] = hregAMD64_R10();
 #endif
 }