Consistently use k{InstructionSet}WordSize.

These constants were defined prior to k{InstructionSet}PointerSize. So
use them consistently in optimizing as a first step. We can discuss
whether we should remove them in a second step.

Change-Id: If129de1a3bb8b65f8d9c816a8ad466815fb202e6
diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc
index 2c586a1..82dced5 100644
--- a/compiler/optimizing/code_generator_arm64.cc
+++ b/compiler/optimizing/code_generator_arm64.cc
@@ -1393,7 +1393,7 @@
           (invoke->GetImtIndex() % mirror::Class::kImtSize) * sizeof(mirror::Class::ImTableEntry);
   Location receiver = invoke->GetLocations()->InAt(0);
   Offset class_offset = mirror::Object::ClassOffset();
-  Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
+  Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
 
   // The register ip1 is required to be used for the hidden argument in
   // art_quick_imt_conflict_trampoline, so prevent VIXL from using it.
@@ -1450,7 +1450,7 @@
   // lr = temp->entry_point_from_quick_compiled_code_;
   __ Ldr(lr, MemOperand(temp.X(),
                         mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(
-                            kArm64PointerSize).SizeValue()));
+                            kArm64WordSize).SizeValue()));
   // lr();
   __ Blr(lr);
 
@@ -1465,7 +1465,7 @@
   size_t method_offset = mirror::Class::EmbeddedVTableOffset().SizeValue() +
     invoke->GetVTableIndex() * sizeof(mirror::Class::VTableEntry);
   Offset class_offset = mirror::Object::ClassOffset();
-  Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64PointerSize);
+  Offset entry_point = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset(kArm64WordSize);
 
   // temp = object->GetClass();
   if (receiver.IsStackSlot()) {