Bug 345248 - add support for Solaris OS in valgrind

Authors of this port:
    Petr Pavlu         setup@dagobah.cz
    Ivo Raisr          ivosh@ivosh.net
    Theo Schlossnagle  theo@omniti.com
            


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@15426 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/Makefile.am b/coregrind/Makefile.am
index a5491ea..934d99e 100644
--- a/coregrind/Makefile.am
+++ b/coregrind/Makefile.am
@@ -44,6 +44,11 @@
 	launcher-darwin.c \
 	m_debuglog.c
 endif
+if VGCONF_OS_IS_SOLARIS
+valgrind_SOURCES = \
+	launcher-linux.c \
+	m_debuglog.c
+endif
 
 valgrind_CPPFLAGS  = $(AM_CPPFLAGS_PRI)
 valgrind_CFLAGS    = $(AM_CFLAGS_PRI)
@@ -73,6 +78,9 @@
 # having access to Darwin, 'none' implementation is used.
 vgdb_SOURCES += vgdb-invoker-none.c
 endif
+if VGCONF_OS_IS_SOLARIS
+vgdb_SOURCES += vgdb-invoker-solaris.c
+endif
 
 vgdb_CPPFLAGS  = $(AM_CPPFLAGS_PRI)
 vgdb_CFLAGS    = $(AM_CFLAGS_PRI)
@@ -81,9 +89,13 @@
 if VGCONF_PLATVARIANT_IS_ANDROID
 vgdb_CFLAGS    += -static
 endif
+if VGCONF_OS_IS_SOLARIS
+vgdb_LDADD     = -lsocket
+else
 if !VGCONF_PLATVARIANT_IS_ANDROID
 vgdb_LDADD     = -lpthread
 endif
+endif
 if VGCONF_PLATFORMS_INCLUDE_X86_DARWIN
 vgdb_LDFLAGS   += -Wl,-read_only_relocs -Wl,suppress
 endif
@@ -249,6 +261,7 @@
 	m_syswrap/priv_syswrap-linux.h \
 	m_syswrap/priv_syswrap-linux-variants.h \
 	m_syswrap/priv_syswrap-darwin.h \
+	m_syswrap/priv_syswrap-solaris.h \
 	m_syswrap/priv_syswrap-main.h \
 	m_syswrap/priv_syswrap-xen.h \
 	m_ume/priv_ume.h \
@@ -316,6 +329,7 @@
 	m_aspacemgr/aspacemgr-segnames.c \
 	m_coredump/coredump-elf.c \
 	m_coredump/coredump-macho.c \
+	m_coredump/coredump-solaris.c \
 	m_debuginfo/misc.c \
 	m_debuginfo/d3basics.c \
 	m_debuginfo/debuginfo.c \
@@ -347,6 +361,8 @@
 	m_dispatch/dispatch-tilegx-linux.S \
 	m_dispatch/dispatch-x86-darwin.S \
 	m_dispatch/dispatch-amd64-darwin.S \
+	m_dispatch/dispatch-x86-solaris.S \
+	m_dispatch/dispatch-amd64-solaris.S \
 	m_gdbserver/inferiors.c \
 	m_gdbserver/m_gdbserver.c \
 	m_gdbserver/regcache.c \
@@ -368,6 +384,7 @@
 	m_gdbserver/version.c \
 	m_initimg/initimg-linux.c \
 	m_initimg/initimg-darwin.c \
+	m_initimg/initimg-solaris.c \
 	m_initimg/initimg-pathscan.c \
 	m_mach/mach_basics.c \
 	m_mach/mach_msg.c \
@@ -391,6 +408,7 @@
 	m_sigframe/sigframe-tilegx-linux.c \
 	m_sigframe/sigframe-x86-darwin.c \
 	m_sigframe/sigframe-amd64-darwin.c \
+	m_sigframe/sigframe-solaris.c \
 	m_syswrap/syscall-x86-linux.S \
 	m_syswrap/syscall-amd64-linux.S \
 	m_syswrap/syscall-ppc32-linux.S \
@@ -404,11 +422,14 @@
 	m_syswrap/syscall-tilegx-linux.S \
 	m_syswrap/syscall-x86-darwin.S \
 	m_syswrap/syscall-amd64-darwin.S \
+	m_syswrap/syscall-x86-solaris.S \
+	m_syswrap/syscall-amd64-solaris.S \
 	m_syswrap/syswrap-main.c \
 	m_syswrap/syswrap-generic.c \
 	m_syswrap/syswrap-linux.c \
 	m_syswrap/syswrap-linux-variants.c \
 	m_syswrap/syswrap-darwin.c \
+	m_syswrap/syswrap-solaris.c \
 	m_syswrap/syswrap-x86-linux.c \
 	m_syswrap/syswrap-amd64-linux.c \
 	m_syswrap/syswrap-ppc32-linux.c \
@@ -422,6 +443,8 @@
 	m_syswrap/syswrap-x86-darwin.c \
 	m_syswrap/syswrap-amd64-darwin.c \
 	m_syswrap/syswrap-xen.c \
+	m_syswrap/syswrap-x86-solaris.c \
+	m_syswrap/syswrap-amd64-solaris.c \
 	m_ume/elf.c \
 	m_ume/macho.c \
 	m_ume/main.c \
@@ -517,6 +540,17 @@
 	$(PRELOAD_LDFLAGS_@VGCONF_PLATFORM_SEC_CAPS@)
 endif
 
+if VGCONF_OS_IS_SOLARIS
+# Give the vgpreload_core library a proper soname so it can be easily
+# recognized during reading of debug information.
+vgpreload_core_@VGCONF_ARCH_PRI@_@VGCONF_OS@_so_LDFLAGS += \
+	-Wl,-soname -Wl,vgpreload_core.so.0
+if VGCONF_HAVE_PLATFORM_SEC
+vgpreload_core_@VGCONF_ARCH_SEC@_@VGCONF_OS@_so_LDFLAGS += \
+	-Wl,-soname -Wl,vgpreload_core.so.0
+endif
+endif
+
 #----------------------------------------------------------------------------
 # gdbserver xml target descriptions
 #----------------------------------------------------------------------------
diff --git a/coregrind/launcher-linux.c b/coregrind/launcher-linux.c
index a5ebb02..ceeb729 100644
--- a/coregrind/launcher-linux.c
+++ b/coregrind/launcher-linux.c
@@ -203,6 +203,14 @@
          const Elf32_Ehdr *ehdr = (Elf32_Ehdr *)header;
 
          if (header[EI_DATA] == ELFDATA2LSB) {
+#           if defined(VGO_solaris)
+            if (ehdr->e_machine == EM_386 &&
+                (ehdr->e_ident[EI_OSABI] == ELFOSABI_SYSV ||
+                 ehdr->e_ident[EI_OSABI] == ELFOSABI_SOLARIS)) {
+               platform = "x86-solaris";
+            }
+            else
+#           endif
             if (ehdr->e_machine == EM_386 &&
                 (ehdr->e_ident[EI_OSABI] == ELFOSABI_SYSV ||
                  ehdr->e_ident[EI_OSABI] == ELFOSABI_LINUX)) {
@@ -239,6 +247,14 @@
          const Elf64_Ehdr *ehdr = (Elf64_Ehdr *)header;
 
          if (header[EI_DATA] == ELFDATA2LSB) {
+#           if defined(VGO_solaris)
+            if (ehdr->e_machine == EM_X86_64 &&
+                (ehdr->e_ident[EI_OSABI] == ELFOSABI_SYSV ||
+                 ehdr->e_ident[EI_OSABI] == ELFOSABI_SOLARIS)) {
+               platform = "amd64-solaris";
+            }
+            else
+#           endif
             if (ehdr->e_machine == EM_X86_64 &&
                 (ehdr->e_ident[EI_OSABI] == ELFOSABI_SYSV ||
                  ehdr->e_ident[EI_OSABI] == ELFOSABI_LINUX)) {
@@ -302,6 +318,7 @@
    const char *platform;
    const char *default_platform;
    const char *cp;
+   const char *linkname;
    char *toolfile;
    const char *launcher_name;
    char* new_line;
@@ -346,6 +363,7 @@
       typically it is the primary build target. Unless the primary build
       target is not built is not built in which case VG_PLATFORM is the
       secondary build target. */
+#  if defined(VGO_linux)
    if ((0==strcmp(VG_PLATFORM,"x86-linux"))    ||
        (0==strcmp(VG_PLATFORM,"amd64-linux"))  ||
        (0==strcmp(VG_PLATFORM,"ppc32-linux"))  ||
@@ -358,6 +376,13 @@
        (0==strcmp(VG_PLATFORM,"mips32-linux")) ||
        (0==strcmp(VG_PLATFORM,"mips64-linux")))
       default_platform = VG_PLATFORM;
+#  elif defined(VGO_solaris)
+   if ((0==strcmp(VG_PLATFORM,"x86-solaris")) ||
+       (0==strcmp(VG_PLATFORM,"amd64-solaris")))
+      default_platform = SOLARIS_LAUNCHER_DEFAULT_PLATFORM;
+#  else
+#    error Unknown OS
+#  endif
    else
       barf("Unknown VG_PLATFORM '%s'", VG_PLATFORM);
 
@@ -380,6 +405,13 @@
    /* Figure out the name of this executable (viz, the launcher), so
       we can tell stage2.  stage2 will use the name for recursive
       invocations of valgrind on child processes. */
+#  if defined(VGO_linux)
+   linkname = "/proc/self/exe";
+#  elif defined(VGO_solaris)
+   linkname = "/proc/self/path/a.out";
+#  else
+#    error Unknown OS
+#  endif
    unsigned bufsiz = 0;
    char *buf = NULL;
 
@@ -388,14 +420,14 @@
       buf = realloc(buf, bufsiz);
       if (buf == NULL)
          barf("realloc of buf failed.");
-      r = readlink("/proc/self/exe", buf, bufsiz);
+      r = readlink(linkname, buf, bufsiz);
       if (r == -1) {
-        /* If /proc/self/exe can't be followed, don't give up.  Instead
-           continue with an empty string for VALGRIND_LAUNCHER.  In the
-           sys_execve wrapper, this is tested, and if found to be empty,
+        /* If /proc/self/exe (/proc/self/path/a.out) can't be followed, don't
+           give up. Instead continue with an empty string for VALGRIND_LAUNCHER.
+           In the sys_execve wrapper, this is tested, and if found to be empty,
            fail the execve. */
         fprintf(stderr, "valgrind: warning (non-fatal): "
-                "readlink(\"/proc/self/exe\") failed.\n");
+                "readlink(\"%s\") failed.\n", linkname);
         fprintf(stderr, "valgrind: continuing, however --trace-children=yes "
                 "will not work.\n");
         launcher_name = "";
diff --git a/coregrind/link_tool_exe_solaris.in b/coregrind/link_tool_exe_solaris.in
new file mode 100644
index 0000000..795220f
--- /dev/null
+++ b/coregrind/link_tool_exe_solaris.in
@@ -0,0 +1,84 @@
+#! @PERL@
+
+# Generic information about a purpose of this script can be found in
+# link_tool_exe_linux.in.
+#
+# Solaris specific notes:
+#
+# - load address has to be specified in the mapfile, there is no command line
+#   option to achieve that
+#
+# - mapfile version 2 is used
+#
+# - information about Solaris linker can be found in its man page
+#   (http://download.oracle.com/docs/cd/E19253-01/816-5165/ld-1/index.html)
+#   and in Oracle's Linker and Libraries Guide
+#   (http://download.oracle.com/docs/cd/E19963-01/html/819-0690/index.html)
+#
+
+use warnings;
+use strict;
+use File::Temp qw/tempfile unlink0/;
+use Fcntl qw/F_SETFD/;
+
+# expect at least: alt-load-address gcc -o foo bar.o
+die "Not enough arguments"
+    if (($#ARGV + 1) < 5);
+
+my $ala = $ARGV[0];
+
+# check for plausible-ish alt load address
+die "Bogus alt-load address"
+    if (length($ala) < 3 || index($ala, "0x") != 0);
+
+# the cc invokation to do the final link
+my $cc = $ARGV[1];
+
+# and the 'restargs' are argv[2 ..]
+
+# create a temporary mapfile
+(my $fh, my $path) = tempfile();
+
+# reset FD_CLOEXEC flag
+fcntl($fh, F_SETFD, 0)
+    or die "Can't clear close-on-exec flag on temp fh: $!";
+
+# safely unlink the file
+unlink0($fh, $path)
+    or die "Error unlinking file $path safely";
+undef $path;
+
+# fill it with data
+#
+# this is a bit tricky, the problem is that the following condition has to be
+# true for both PT_LOAD segments:
+# (phdr->p_vaddr & PAGEOFFSET) == (phdr->p_offset & PAGEOFFSET)
+# if it doesn't hold then the kernel maps a segment as an anon mapping instead
+# of a file mapping (which, for example, breaks reading debug information)
+print $fh <<"END";
+\$mapfile_version 2
+LOAD_SEGMENT text { VADDR = $ala; ROUND = 0x1000 };
+LOAD_SEGMENT data { ROUND = 0x1000 };
+END
+
+# build up the complete command here:
+# 'cc' -Wl,-Mtmpfile 'restargs'
+
+my $cmd="$cc -Wl,-M/proc/$$/fd/" . fileno($fh);
+
+# add the rest of the parameters
+foreach my $n (2 .. $#ARGV) {
+    $cmd = "$cmd $ARGV[$n]";
+}
+
+#print "link_tool_exe_solaris: $cmd\n";
+
+
+# execute the command:
+my $r = system("$cmd");
+
+if ($r == 0) {
+    exit 0;
+} else {
+    exit 1;
+}
diff --git a/coregrind/m_addrinfo.c b/coregrind/m_addrinfo.c
index 46b4487..5ff0d28 100644
--- a/coregrind/m_addrinfo.c
+++ b/coregrind/m_addrinfo.c
@@ -265,7 +265,11 @@
 
       /* Special case to detect the brk data segment. */
       if (seg != NULL
+#if defined(VGO_solaris)
+          && (seg->kind == SkAnonC || seg->kind == SkFileC)
+#else
           && seg->kind == SkAnonC
+#endif /* VGO_solaris */
           && VG_(brk_limit) >= seg->start
           && VG_(brk_limit) <= seg->end+1) {
          /* Address a is in a Anon Client segment which contains
diff --git a/coregrind/m_aspacemgr/aspacemgr-common.c b/coregrind/m_aspacemgr/aspacemgr-common.c
index a235875..987f97a 100644
--- a/coregrind/m_aspacemgr/aspacemgr-common.c
+++ b/coregrind/m_aspacemgr/aspacemgr-common.c
@@ -174,6 +174,18 @@
    }
    res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length,
                           prot, flags, (UInt)fd, offset);
+#  elif defined(VGP_x86_solaris)
+   /* MAP_ANON with fd==0 is EINVAL. */
+   if (fd == 0 && (flags & VKI_MAP_ANONYMOUS))
+      fd = -1;
+   res = VG_(do_syscall7)(__NR_mmap64, (UWord)start, length, prot, flags,
+                          (UInt)fd, offset & 0xffffffff, offset >> 32);
+#  elif defined(VGP_amd64_solaris)
+   /* MAP_ANON with fd==0 is EINVAL. */
+   if (fd == 0 && (flags & VKI_MAP_ANONYMOUS))
+      fd = -1;
+   res = VG_(do_syscall6)(__NR_mmap, (UWord)start, length, prot, flags,
+                          (UInt)fd, offset);
 #  else
 #    error Unknown platform
 #  endif
@@ -249,8 +261,13 @@
 #  elif defined(VGP_tilegx_linux)
    SysRes res = VG_(do_syscall4)(__NR_openat, VKI_AT_FDCWD, (UWord)pathname,
                                  flags, mode);
-#  else
+#  elif defined(VGO_linux) || defined(VGO_darwin)
    SysRes res = VG_(do_syscall3)(__NR_open, (UWord)pathname, flags, mode);
+#  elif defined(VGO_solaris)
+   SysRes res = VG_(do_syscall4)(__NR_openat, VKI_AT_FDCWD, (UWord)pathname,
+                                 flags, mode);
+#  else
+#    error Unknown OS
 #  endif
    return res;
 }
@@ -275,15 +292,20 @@
 #  elif defined(VGP_tilegx_linux)
    res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path,
                           (UWord)buf, bufsiz);
-#  else
+#  elif defined(VGO_linux) || defined(VGO_darwin)
    res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
+#  elif defined(VGO_solaris)
+   res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path,
+                          (UWord)buf, bufsiz);
+#  else
+#    error Unknown OS
 #  endif
    return sr_isError(res) ? -1 : sr_Res(res);
 }
 
 Int ML_(am_fcntl) ( Int fd, Int cmd, Addr arg )
 {
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg);
 #  elif defined(VGO_darwin)
    SysRes res = VG_(do_syscall3)(__NR_fcntl_nocancel, fd, cmd, arg);
@@ -299,6 +321,7 @@
                            /*OUT*/ULong* dev, 
                            /*OUT*/ULong* ino, /*OUT*/UInt* mode )
 {
+#  if defined(VGO_linux) || defined(VGO_darwin)
    SysRes          res;
    struct vki_stat buf;
 #  if defined(VGO_linux) && defined(__NR_fstat64)
@@ -322,6 +345,26 @@
       return True;
    }
    return False;
+#  elif defined(VGO_solaris)
+#  if defined(VGP_x86_solaris)
+   struct vki_stat64 buf64;
+   SysRes res = VG_(do_syscall4)(__NR_fstatat64, fd, 0, (UWord)&buf64, 0);
+#  elif defined(VGP_amd64_solaris)
+   struct vki_stat buf64;
+   SysRes res = VG_(do_syscall4)(__NR_fstatat, fd, 0, (UWord)&buf64, 0);
+#  else
+#    error "Unknown platform"
+#  endif
+   if (!sr_isError(res)) {
+      *dev  = (ULong)buf64.st_dev;
+      *ino  = (ULong)buf64.st_ino;
+      *mode = (UInt) buf64.st_mode;
+      return True;
+   }
+   return False;
+#  else
+#    error Unknown OS
+#  endif
 }
 
 Bool ML_(am_resolve_filename) ( Int fd, /*OUT*/HChar* buf, Int nbuf )
@@ -347,6 +390,16 @@
    }
    return False;
 
+#elif defined(VGO_solaris)
+   Int i;
+   HChar tmp[64];
+   for (i = 0; i < nbuf; i++) buf[i] = 0;
+   ML_(am_sprintf)(tmp, "/proc/self/path/%d", fd);
+   if (ML_(am_readlink)(tmp, buf, nbuf) > 0 && buf[0] == '/')
+      return True;
+   else
+      return False;
+
 #  else
 #     error Unknown OS
 #  endif
diff --git a/coregrind/m_aspacemgr/aspacemgr-linux.c b/coregrind/m_aspacemgr/aspacemgr-linux.c
index 5ec5aee..9fe0dff 100644
--- a/coregrind/m_aspacemgr/aspacemgr-linux.c
+++ b/coregrind/m_aspacemgr/aspacemgr-linux.c
@@ -32,7 +32,7 @@
    The GNU General Public License is contained in the file COPYING.
 */
 
-#if defined(VGO_linux) || defined(VGO_darwin)
+#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 /* *************************************************************
    DO NOT INCLUDE ANY OTHER FILES HERE.
@@ -100,6 +100,15 @@
      clustered towards the start of available space, and Valgrind ones
      in the middle.
 
+     On Solaris, searches for client space start at (aspacem_vStart - 1)
+     and for Valgrind space start at (aspacem_maxAddr - 1) and go backwards.
+     This simulates what kernel does - brk limit grows from bottom and mmap'ed
+     objects from top. It is in contrary with Linux where data segment
+     and mmap'ed objects grow from bottom (leading to early data segment
+     exhaustion for tools which do not use m_replacemalloc). While Linux glibc
+     can cope with this problem by employing mmap, Solaris libc treats inability
+     to grow brk limit as a hard failure.
+
      The available space is delimited by aspacem_minAddr and
      aspacem_maxAddr.  aspacem is flexible and can operate with these
      at any (sane) setting.  For 32-bit Linux, aspacem_minAddr is set
@@ -297,14 +306,17 @@
 
 
 Addr VG_(clo_aspacem_minAddr)
-#if defined(VGO_darwin)
+#if defined(VGO_linux)
+   = (Addr) 0x04000000; // 64M
+#elif defined(VGO_darwin)
 # if VG_WORDSIZE == 4
    = (Addr) 0x00001000;
 # else
    = (Addr) 0x100000000;  // 4GB page zero
 # endif
+#elif defined(VGO_solaris)
+   = (Addr) 0x00100000; // 1MB
 #else
-   = (Addr) 0x04000000; // 64M
 #endif
 
 
@@ -1122,6 +1134,18 @@
       return &nsegments[i];
 }
 
+/* Finds an anonymous segment containing 'a'. Returned pointer is read only. */
+NSegment const *VG_(am_find_anon_segment) ( Addr a )
+{
+   Int i = find_nsegment_idx(a);
+   aspacem_assert(i >= 0 && i < nsegments_used);
+   aspacem_assert(nsegments[i].start <= a);
+   aspacem_assert(a <= nsegments[i].end);
+   if (nsegments[i].kind == SkAnonC || nsegments[i].kind == SkAnonV)
+      return &nsegments[i];
+   else
+      return NULL;
+}
 
 /* Map segment pointer to segment index. */
 static Int segAddr_to_index ( const NSegment* seg )
@@ -1593,7 +1617,96 @@
 
    suggested_clstack_end = -1; // ignored; Mach-O specifies its stack
 
-#else /* !defined(VGO_darwin) */
+#elif defined(VGO_solaris)
+#  if VG_WORDSIZE == 4
+   /*
+      Intended address space partitioning:
+
+      ,--------------------------------, 0x00000000
+      |                                |
+      |--------------------------------|
+      | initial stack given to V by OS |
+      |--------------------------------| 0x08000000
+      |          client text           |
+      |--------------------------------|
+      |                                |
+      |                                |
+      |--------------------------------|
+      |          client stack          |
+      |--------------------------------| 0x38000000
+      |            V's text            |
+      |--------------------------------|
+      |                                |
+      |                                |
+      |--------------------------------|
+      |     dynamic shared objects     |
+      '--------------------------------' 0xffffffff
+
+      */
+
+   /* Anonymous pages need to fit under user limit (USERLIMIT32)
+      which is 4KB + 16MB below the top of the 32-bit range. */
+#    ifdef ENABLE_INNER
+     aspacem_maxAddr = (Addr)0x4fffffff; // 1.25GB
+     aspacem_vStart  = (Addr)0x40000000; // 1GB
+#    else
+     aspacem_maxAddr = (Addr)0xfefff000 - 1; // 4GB - 16MB - 4KB
+     aspacem_vStart  = (Addr)0x50000000; // 1.25GB
+#    endif
+#  elif VG_WORDSIZE == 8
+   /*
+      Intended address space partitioning:
+
+      ,--------------------------------, 0x00000000_00000000
+      |                                |
+      |--------------------------------| 0x00000000_00400000
+      |          client text           |
+      |--------------------------------|
+      |                                |
+      |                                |
+      |--------------------------------|
+      |          client stack          |
+      |--------------------------------| 0x00000000_38000000
+      |            V's text            |
+      |--------------------------------|
+      |                                |
+      |--------------------------------|
+      |     dynamic shared objects     |
+      |--------------------------------| 0x0000000f_ffffffff
+      |                                |
+      |                                |
+      |--------------------------------|
+      | initial stack given to V by OS |
+      '--------------------------------' 0xffffffff_ffffffff
+
+      */
+
+   /* Kernel likes to place objects at the end of the address space.
+      However accessing memory beyond 64GB makes memcheck slow
+      (see memcheck/mc_main.c, internal representation). Therefore:
+      - mmapobj() syscall is emulated so that libraries are subject to
+        Valgrind's aspacemgr control
+      - Kernel shared pages (such as schedctl and hrt) are left as they are
+        because kernel cannot be told where they should be put */
+#    ifdef ENABLE_INNER
+     aspacem_maxAddr = (Addr) 0x00000007ffffffff; // 32GB
+     aspacem_vStart  = (Addr) 0x0000000400000000; // 16GB
+#    else
+     aspacem_maxAddr = (Addr) 0x0000000fffffffff; // 64GB
+     aspacem_vStart  = (Addr) 0x0000000800000000; // 32GB
+#    endif
+#  else
+#    error "Unknown word size"
+#  endif
+
+   aspacem_cStart = aspacem_minAddr;
+#  ifdef ENABLE_INNER
+   suggested_clstack_end = (Addr) 0x27ff0000 - 1; // 64kB below V's text
+#  else
+   suggested_clstack_end = (Addr) 0x37ff0000 - 1; // 64kB below V's text
+#  endif
+
+#else
 
    /* Establish address limits and block out unusable parts
       accordingly. */
@@ -1624,7 +1737,7 @@
    suggested_clstack_end = aspacem_maxAddr - 16*1024*1024ULL
                                            + VKI_PAGE_SIZE;
 
-#endif /* #else of 'defined(VGO_darwin)' */
+#endif
 
    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_minAddr));
    aspacem_assert(VG_IS_PAGE_ALIGNED(aspacem_maxAddr + 1));
@@ -1749,9 +1862,13 @@
    Addr holeStart, holeEnd, holeLen;
    Bool fixed_not_required;
 
+#if defined(VGO_solaris)
+   Addr startPoint = forClient ? aspacem_vStart - 1 : aspacem_maxAddr - 1;
+#else
    Addr startPoint = forClient ? aspacem_cStart : aspacem_vStart;
+#endif /* VGO_solaris */
 
-   Addr reqStart = req->rkind==MAny ? 0 : req->start;
+   Addr reqStart = req->rkind==MFixed || req->rkind==MHint ? req->start : 0;
    Addr reqEnd   = reqStart + req->len - 1;
    Addr reqLen   = req->len;
 
@@ -1834,18 +1951,39 @@
    /* ------ Implement the Default Policy ------ */
 
    /* Don't waste time looking for a fixed match if not requested to. */
-   fixed_not_required = req->rkind == MAny;
+   fixed_not_required = req->rkind == MAny || req->rkind == MAlign;
 
    i = find_nsegment_idx(startPoint);
 
+#if defined(VGO_solaris)
+#  define UPDATE_INDEX(index)                               \
+      (index)--;                                            \
+      if ((index) <= 0)                                     \
+         (index) = nsegments_used - 1;
+#  define ADVISE_ADDRESS(segment)                           \
+       VG_PGROUNDDN((segment)->end + 1 - reqLen)
+#  define ADVISE_ADDRESS_ALIGNED(segment)                   \
+        VG_ROUNDDN((segment)->end + 1 - reqLen, req->start)
+
+#else
+
+#  define UPDATE_INDEX(index)                               \
+      (index)++;                                            \
+      if ((index) >= nsegments_used)                        \
+         (index) = 0;
+#  define ADVISE_ADDRESS(segment)                           \
+      (segment)->start
+#  define ADVISE_ADDRESS_ALIGNED(segment)                   \
+      VG_ROUNDUP((segment)->start, req->start)
+#endif /* VGO_solaris */
+
    /* Examine holes from index i back round to i-1.  Record the
       index first fixed hole and the first floating hole which would
       satisfy the request. */
    for (j = 0; j < nsegments_used; j++) {
 
       if (nsegments[i].kind != SkFree) {
-         i++;
-         if (i >= nsegments_used) i = 0;
+         UPDATE_INDEX(i);
          continue;
       }
 
@@ -1857,6 +1995,15 @@
       aspacem_assert(aspacem_minAddr <= holeStart);
       aspacem_assert(holeEnd <= aspacem_maxAddr);
 
+      if (req->rkind == MAlign) {
+         holeStart = VG_ROUNDUP(holeStart, req->start);
+         if (holeStart >= holeEnd) {
+            /* This hole can't be used. */
+            UPDATE_INDEX(i);
+            continue;
+         }
+      }
+
       /* See if it's any use to us. */
       holeLen = holeEnd - holeStart + 1;
 
@@ -1870,8 +2017,7 @@
       if ((fixed_not_required || fixedIdx >= 0) && floatIdx >= 0)
          break;
 
-      i++;
-      if (i >= nsegments_used) i = 0;
+      UPDATE_INDEX(i);
    }
 
    aspacem_assert(fixedIdx >= -1 && fixedIdx < nsegments_used);
@@ -1902,14 +2048,21 @@
          }
          if (floatIdx >= 0) {
             *ok = True;
-            return nsegments[floatIdx].start;
+            return ADVISE_ADDRESS(&nsegments[floatIdx]);
          }
          *ok = False;
          return 0;
       case MAny:
          if (floatIdx >= 0) {
             *ok = True;
-            return nsegments[floatIdx].start;
+            return ADVISE_ADDRESS(&nsegments[floatIdx]);
+         }
+         *ok = False;
+         return 0;
+      case MAlign:
+         if (floatIdx >= 0) {
+            *ok = True;
+            return ADVISE_ADDRESS_ALIGNED(&nsegments[floatIdx]);
          }
          *ok = False;
          return 0;
@@ -1921,6 +2074,10 @@
    ML_(am_barf)("getAdvisory: unknown request kind");
    *ok = False;
    return 0;
+
+#undef UPDATE_INDEX
+#undef ADVISE_ADDRESS
+#undef ADVISE_ADDRESS_ALIGNED
 }
 
 /* Convenience wrapper for VG_(am_get_advisory) for client floating or
@@ -2158,12 +2315,30 @@
 SysRes VG_(am_mmap_file_fixed_client)
      ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset )
 {
-   return VG_(am_mmap_named_file_fixed_client)(start, length, prot, fd, offset, NULL);
+   UInt flags = VKI_MAP_FIXED | VKI_MAP_PRIVATE;
+   return VG_(am_mmap_named_file_fixed_client_flags)(start, length, prot, flags,
+                                                     fd, offset, NULL);
+}
+
+SysRes VG_(am_mmap_file_fixed_client_flags)
+     ( Addr start, SizeT length, UInt prot, UInt flags, Int fd, Off64T offset )
+{
+   return VG_(am_mmap_named_file_fixed_client_flags)(start, length, prot, flags,
+                                                     fd, offset, NULL);
 }
 
 SysRes VG_(am_mmap_named_file_fixed_client)
      ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset, const HChar *name )
 {
+   UInt flags = VKI_MAP_FIXED | VKI_MAP_PRIVATE;
+   return VG_(am_mmap_named_file_fixed_client_flags)(start, length, prot, flags,
+                                                     fd, offset, name);
+}
+
+SysRes VG_(am_mmap_named_file_fixed_client_flags)
+     ( Addr start, SizeT length, UInt prot, UInt flags,
+       Int fd, Off64T offset, const HChar *name )
+{
    SysRes     sres;
    NSegment   seg;
    Addr       advised;
@@ -2192,8 +2367,7 @@
       any resulting failure immediately. */
    // DDD: #warning GrP fixme MAP_FIXED can clobber memory!
    sres = VG_(am_do_mmap_NO_NOTIFY)( 
-             start, length, prot, 
-             VKI_MAP_FIXED|VKI_MAP_PRIVATE, 
+             start, length, prot, flags,
              fd, offset 
           );
    if (sr_isError(sres))
@@ -3603,7 +3777,274 @@
 
 /*------END-procmaps-parser-for-Darwin---------------------------*/
 
-#endif // defined(VGO_linux) || defined(VGO_darwin)
+/*------BEGIN-procmaps-parser-for-Solaris------------------------*/
+
+#if defined(VGO_solaris)
+
+/* Note: /proc/self/xmap contains extended information about already
+   materialized mappings whereas /proc/self/rmap contains information about
+   all mappings including reserved but yet-to-materialize mappings (mmap'ed
+   with MAP_NORESERVE flag, such as thread stacks). But /proc/self/rmap does
+   not contain extended information found in /proc/self/xmap. Therefore
+   information from both sources need to be combined.
+ */
+
+typedef struct
+{
+   Addr   addr;
+   SizeT  size;
+   UInt   prot;
+   ULong  dev;
+   ULong  ino;
+   Off64T foffset;
+   HChar  filename[VKI_PATH_MAX];
+} Mapping;
+
+static SizeT read_proc_file(const HChar *filename, HChar *buf,
+                            SizeT buf_size, const HChar *buf_size_name,
+                            SizeT entry_size)
+{
+   SysRes res = ML_(am_open)(filename, VKI_O_RDONLY, 0);
+   if (sr_isError(res)) {
+      HChar message[100];
+      ML_(am_sprintf)(message, "Cannot open %s.", filename);
+      ML_(am_barf)(message);
+   }
+   Int fd = sr_Res(res);
+
+   Int r = ML_(am_read)(fd, buf, buf_size);
+   ML_(am_close)(fd);
+   if (r < 0) {
+      HChar message[100];
+      ML_(am_sprintf)(message, "I/O error on %s.", filename);
+      ML_(am_barf)(message);
+   }
+
+   if (r >= buf_size)
+      ML_(am_barf_toolow)(buf_size_name);
+
+   if (r % entry_size != 0) {
+      HChar message[100];
+      ML_(am_sprintf)(message, "Bogus values read from %s.", filename);
+      ML_(am_barf)(message);
+   }
+
+   return r / entry_size;
+}
+
+static Mapping *next_xmap(const HChar *buffer, SizeT entries, SizeT *idx,
+                          Mapping *mapping)
+{
+   aspacem_assert(idx);
+   aspacem_assert(mapping);
+
+   if (*idx >= entries)
+      return NULL; /* No more entries */
+
+   const vki_prxmap_t *map = (const vki_prxmap_t *)buffer + *idx;
+
+   mapping->addr = map->pr_vaddr;
+   mapping->size = map->pr_size;
+
+   mapping->prot = 0;
+   if (map->pr_mflags & VKI_MA_READ)
+      mapping->prot |= VKI_PROT_READ;
+   if (map->pr_mflags & VKI_MA_WRITE)
+      mapping->prot |= VKI_PROT_WRITE;
+   if (map->pr_mflags & VKI_MA_EXEC)
+      mapping->prot |= VKI_PROT_EXEC;
+
+   if (map->pr_dev != VKI_PRNODEV) {
+      mapping->dev = map->pr_dev;
+      mapping->ino = map->pr_ino;
+      mapping->foffset = map->pr_offset;
+   }
+   else {
+      mapping->dev = 0;
+      mapping->ino = 0;
+      mapping->foffset = 0;
+   }
+
+   /* Try to get the filename. */
+   mapping->filename[0] = '\0';
+   if (map->pr_mapname[0] != '\0') {
+      ML_(am_sprintf)(mapping->filename, "/proc/self/path/%s",
+                      map->pr_mapname);
+      Int r = ML_(am_readlink)(mapping->filename, mapping->filename,
+                               sizeof(mapping->filename) - 1);
+      if (r == -1) {
+         /* If Valgrind is executed in a non-global zone and the link in
+            /proc/self/path/ represents a file that is available through lofs
+            from a global zone then the kernel may not be able to resolve the
+            link.
+
+            In such a case, return a corresponding /proc/self/object/ file to
+            allow Valgrind to read the file if it is necessary.
+
+            This can create some discrepancy for the sanity check. For
+            instance, if a client program mmaps some file then the address
+            space manager will have a correct zone-local name of that file,
+            but the sanity check will receive a different file name from this
+            code. This currently does not represent a problem because the
+            sanity check ignores the file names (it uses device and inode
+            numbers for the comparison).
+          */
+         ML_(am_sprintf)(mapping->filename, "/proc/self/object/%s",
+                         map->pr_mapname);
+      }
+      else {
+         aspacem_assert(r >= 0);
+         mapping->filename[r] = '\0';
+      }
+   }
+
+   *idx += 1;
+   return mapping;
+}
+
+static Mapping *next_rmap(const HChar *buffer, SizeT entries, SizeT *idx,
+                          Mapping *mapping)
+{
+   aspacem_assert(idx);
+   aspacem_assert(mapping);
+
+   if (*idx >= entries)
+      return NULL; /* No more entries */
+
+   const vki_prmap_t *map = (const vki_prmap_t *)buffer + *idx;
+
+   mapping->addr = map->pr_vaddr;
+   mapping->size = map->pr_size;
+
+   mapping->prot = 0;
+   if (map->pr_mflags & VKI_MA_READ)
+      mapping->prot |= VKI_PROT_READ;
+   if (map->pr_mflags & VKI_MA_WRITE)
+      mapping->prot |= VKI_PROT_WRITE;
+   if (map->pr_mflags & VKI_MA_EXEC)
+      mapping->prot |= VKI_PROT_EXEC;
+
+   mapping->dev = 0;
+   mapping->ino = 0;
+   mapping->foffset = 0;
+   mapping->filename[0] = '\0';
+
+   *idx += 1;
+   return mapping;
+}
+
+/* Used for two purposes:
+   1. Establish initial mappings upon the process startup
+   2. Check mappings during aspacemgr sanity check
+ */
+static void parse_procselfmaps (
+      void (*record_mapping)( Addr addr, SizeT len, UInt prot,
+                              ULong dev, ULong ino, Off64T offset,
+                              const HChar *filename ),
+      void (*record_gap)( Addr addr, SizeT len )
+   )
+{
+   Addr start = Addr_MIN;
+   Addr gap_start = Addr_MIN;
+
+#define M_XMAP_BUF (VG_N_SEGMENTS * sizeof(vki_prxmap_t))
+   /* Static to keep it out of stack frame... */
+   static HChar xmap_buf[M_XMAP_BUF];
+   const Mapping *xmap = NULL;
+   SizeT xmap_index = 0; /* Current entry */
+   SizeT xmap_entries;
+   Mapping xmap_mapping;
+   Bool advance_xmap;
+
+#define M_RMAP_BUF (VG_N_SEGMENTS * sizeof(vki_prmap_t))
+   static HChar rmap_buf[M_RMAP_BUF];
+   const Mapping *rmap = NULL;
+   SizeT rmap_index = 0; /* Current entry */
+   SizeT rmap_entries;
+   Mapping rmap_mapping;
+   Bool advance_rmap;
+
+   /* Read fully /proc/self/xmap and /proc/self/rmap. */
+   xmap_entries = read_proc_file("/proc/self/xmap", xmap_buf, M_XMAP_BUF,
+                                 "M_XMAP_BUF", sizeof(vki_prxmap_t));
+
+   rmap_entries = read_proc_file("/proc/self/rmap", rmap_buf, M_RMAP_BUF,
+                                 "M_RMAP_BUF", sizeof(vki_prmap_t));
+
+   /* Get the first xmap and rmap. */
+   advance_xmap = True;
+   advance_rmap = True;
+
+   while (1) {
+      /* Get next xmap or rmap if necessary. */
+      if (advance_xmap) {
+         xmap = next_xmap(xmap_buf, xmap_entries, &xmap_index, &xmap_mapping);
+         advance_xmap = False;
+      }
+      if (advance_rmap) {
+         rmap = next_rmap(rmap_buf, rmap_entries, &rmap_index, &rmap_mapping);
+         advance_rmap = False;
+      }
+
+      /* Check if the end has been reached. */
+      if (rmap == NULL)
+         break;
+
+      /* Invariants */
+      if (xmap != NULL) {
+         aspacem_assert(start <= xmap->addr);
+         aspacem_assert(rmap->addr <= xmap->addr);
+      }
+
+      if (xmap != NULL && start == xmap->addr) {
+         /* xmap mapping reached. */
+         aspacem_assert(xmap->addr >= rmap->addr &&
+                        xmap->addr + xmap->size <= rmap->addr + rmap->size);
+         aspacem_assert(xmap->prot == rmap->prot);
+
+         if (record_mapping != NULL)
+            (*record_mapping)(xmap->addr, xmap->size, xmap->prot, xmap->dev,
+                              xmap->ino, xmap->foffset,
+                              (xmap->filename[0] != '\0') ?
+                               xmap->filename : NULL);
+
+         start = xmap->addr + xmap->size;
+         advance_xmap = True;
+      }
+      else if (start >= rmap->addr) {
+         /* Reserved-only part. */
+         /* First calculate size until the end of this reserved mapping... */
+         SizeT size = rmap->addr + rmap->size - start;
+         /* ... but shrink it if some xmap is in a way. */
+         if (xmap != NULL && size > xmap->addr - start)
+            size = xmap->addr - start;
+
+         if (record_mapping != NULL)
+            (*record_mapping)(start, size, rmap->prot, 0, 0, 0, NULL);
+         start += size;
+      }
+      else {
+         /* Gap. */
+         if (record_gap != NULL && gap_start < start)
+            (*record_gap)(gap_start, start - gap_start);
+         start = rmap->addr;
+      }
+
+      if (rmap->addr + rmap->size <= start)
+         advance_rmap = True;
+
+      gap_start = start;
+   }
+
+   if (record_gap != NULL && gap_start < Addr_MAX)
+      (*record_gap)(gap_start, Addr_MAX - gap_start + 1);
+}
+
+#endif // defined(VGO_solaris)
+
+/*------END-procmaps-parser-for-Solaris--------------------------*/
+
+#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 /*--------------------------------------------------------------------*/
 /*--- end                                                          ---*/
diff --git a/coregrind/m_clientstate.c b/coregrind/m_clientstate.c
index e1a6040..35aee35 100644
--- a/coregrind/m_clientstate.c
+++ b/coregrind/m_clientstate.c
@@ -51,9 +51,13 @@
 /* Initial highest address of the stack segment of the main thread. */
 Addr  VG_(clstk_end)   = 0;
 UWord VG_(clstk_id)    = 0;
+/* Maximum size of the main thread's client stack. */
+SizeT VG_(clstk_max_size) = 0;
 
-/* linux only: where is the client auxv ? */
-/* This is set up as part of setup_client_stack in initimg-linux.c. */
+/* Solaris and Linux only, specifies where the client auxv is.
+
+   This is set up as part of setup_client_stack() in
+   initimg-{linux,solaris}.c. */
 UWord* VG_(client_auxv) = NULL;
 
 Addr  VG_(brk_base)    = 0;       /* start of brk */
@@ -68,6 +72,11 @@
 /* A fd which refers to the fake /proc/<pid>/auxv in /tmp. */
 Int VG_(cl_auxv_fd) = -1;
 
+#if defined(VGO_solaris)
+/* A fd which refers to the fake /proc/<pid>/psinfo in /tmp. */
+Int VG_(cl_psinfo_fd) = -1;
+#endif /* VGO_solaris */
+
 // Command line pieces, after they have been extracted from argv in
 // m_main.main().  The payload vectors are allocated in VG_AR_CORE
 // (the default arena).  They are never freed.
@@ -113,6 +122,12 @@
    in nptl/allocatestack.c */
 SizeT* VG_(client__stack_cache_actsize__addr) = 0;
 
+#if defined(VGO_solaris)
+/* Address of variable vg_vfork_fildes in vgpreload_core.so.0
+   (vg_preloaded.c). */
+Int* VG_(vfork_fildes_addr) = 0;
+#endif
+
 /*--------------------------------------------------------------------*/
 /*--- end                                                          ---*/
 /*--------------------------------------------------------------------*/
diff --git a/coregrind/m_coredump/coredump-elf.c b/coregrind/m_coredump/coredump-elf.c
index 038b771..8b7871e 100644
--- a/coregrind/m_coredump/coredump-elf.c
+++ b/coregrind/m_coredump/coredump-elf.c
@@ -194,8 +194,6 @@
 static void fill_prpsinfo(const ThreadState *tst,
                           struct vki_elf_prpsinfo *prpsinfo)
 {
-   const HChar *name;
-
    VG_(memset)(prpsinfo, 0, sizeof(*prpsinfo));
 
    switch(tst->status) {
@@ -221,16 +219,7 @@
    prpsinfo->pr_uid = 0;
    prpsinfo->pr_gid = 0;
    
-   if (VG_(resolve_filename)(VG_(cl_exec_fd), &name)) {
-      const HChar *n = name + VG_(strlen)(name) - 1;
-
-      while (n > name && *n != '/')
-	 n--;
-      if (n != name)
-	 n++;
-
-      VG_(strncpy)(prpsinfo->pr_fname, n, sizeof(prpsinfo->pr_fname));
-   }
+   VG_(client_fname)(prpsinfo->pr_fname, sizeof(prpsinfo->pr_fname), False);
 }
 
 static void fill_prstatus(const ThreadState *tst, 
diff --git a/coregrind/m_coredump/coredump-solaris.c b/coregrind/m_coredump/coredump-solaris.c
new file mode 100644
index 0000000..9b6473c
--- /dev/null
+++ b/coregrind/m_coredump/coredump-solaris.c
@@ -0,0 +1,1110 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Dumping core on Solaris.                  coredump-solaris.c ---*/
+/*--------------------------------------------------------------------*/
+ 
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2013-2014 Ivo Raisr
+      ivosh@ivosh.net
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGO_solaris)
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_aspacehl.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_coredump.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcfile.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_machine.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_options.h"
+#include "pub_core_syscall.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h"
+
+typedef struct __attribute__ ((__packed__)) note {
+   struct note *next;
+   VKI_ESZ(Nhdr) nhdr;
+   HChar name[8];
+   HChar data[0];
+} note_t;
+
+static void add_note(note_t **list, UInt type, const void *data,
+                     UInt datasz);
+
+/* If true, then this Segment may be mentioned in the core */
+static Bool may_dump(const NSegment *seg)
+{
+   if ((seg->kind == SkAnonC) ||
+       (seg->kind == SkShmC) ||
+       ((seg->kind == SkFileC) &&
+        !VKI_S_ISCHR(seg->mode) && !VKI_S_ISBLK(seg->mode)))
+      return True;
+
+   return False;
+}
+
+/* If true, then this Segment's contents will be in the core */
+static Bool should_dump(const NSegment *seg)
+{
+   return may_dump(seg);
+}
+
+#if defined(SOLARIS_PRXREGSET_T)
+static Bool should_dump_xregs(const ThreadState *tst)
+{
+#if defined(VGP_x86_solaris)
+   return False;
+#elif defined(VGP_amd64_solaris)
+   const ThreadArchState *arch = (const ThreadArchState *) &tst->arch;
+
+   /* Dump 256-bit wide %ymm only when their upper half is non-zero. */
+   #define YMM_NON_ZERO(reg) \
+      ((reg[4] != 0) || (reg[5] != 0) || (reg[6] != 0) || (reg[7] != 0))
+   if (YMM_NON_ZERO(arch->vex.guest_YMM0) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM1) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM2) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM3) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM4) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM5) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM6) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM7) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM9) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM0) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM10) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM11) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM12) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM13) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM14) ||
+       YMM_NON_ZERO(arch->vex.guest_YMM15))
+      return True;
+
+   return False;
+
+   #undef YMM_NON_ZERO
+#else
+#  error Unknown ELF platform
+#endif
+}
+#endif /* SOLARIS_PRXREGSET_T */
+
+static void write_part(Int fd, const HChar *filename,
+                       void *buf, SizeT buf_size, const HChar *part)
+{
+   Int ret = VG_(write)(fd, buf, buf_size);
+   if (ret < 0) {
+      VG_(umsg)("Failed to write %s to coredump file %s, it may be "
+                "incomplete.\n", part, filename);
+      VG_(debugLog)(1, "coredump-solaris", "write_part: failed to write "
+                    "%s to file %s. Buffer address=%p, length=%lu. "
+                    "Error=%d.\n", part, filename, buf, buf_size, -ret);
+   }
+}
+
+/*====================================================================*/
+/*=== Miscellaneous getters                                        ===*/
+/*====================================================================*/
+
+static Int get_uid(void)
+{
+   return sr_Res(VG_(do_syscall0)(SYS_getuid));
+}
+
+static Int get_gid(void)
+{
+   return sr_Res(VG_(do_syscall0)(SYS_getgid));
+}
+
+static Int get_dmodel(void)
+{
+#if defined(VGP_x86_solaris)
+   return PR_MODEL_ILP32;
+#elif defined(VGP_amd64_solaris)
+   return PR_MODEL_LP64;
+#else
+#  error "Unknown platform"
+#endif
+}
+
+static vki_zoneid_t get_zoneid(void)
+{
+   SysRes sres = VG_(do_syscall2)(SYS_zone, VKI_ZONE_LOOKUP,
+                                  (UWord) NULL);
+   if (sr_isError(sres))
+       return 0;
+
+   return sr_Res(sres);
+}
+
+static UInt count_auxv(void)
+{
+   UInt count = 1;
+
+   vki_auxv_t *auxv = (vki_auxv_t *) VG_(client_auxv);
+   while (auxv->a_type != VKI_AT_NULL) {
+      count += 1;
+      auxv++;
+   }
+
+   return count;
+}
+
+static Addr compute_stkbase(const ThreadState *tst)
+{
+   return tst->client_stack_highest_byte + 1
+          - tst->client_stack_szB;
+}
+
+static Int get_wstat(const vki_siginfo_t *si)
+{
+   return (si->si_signo & 0xff) | WCOREFLG;
+}
+
+/*====================================================================*/
+/*=== Utility fillers                                              ===*/
+/*====================================================================*/
+
+static void fill_platform(HChar *buf, UInt buf_size)
+{
+   vg_assert(buf != NULL);
+   vg_assert(buf_size >= 1);
+
+   buf[0] = '\0';
+
+   VG_(do_syscall3)(SYS_systeminfo, VKI_SI_PLATFORM,
+                    (UWord) buf, buf_size);
+}
+
+static void fill_zonename(HChar *buf, UInt buf_size)
+{
+   vg_assert(buf != NULL);
+   vg_assert(buf_size >= 1);
+
+   buf[0] = '\0';
+
+   VG_(do_syscall5)(SYS_zone, VKI_ZONE_GETATTR, get_zoneid(),
+                    VKI_ZONE_ATTR_NAME, (UWord) buf, buf_size);
+}
+
+static void fill_thread_state(const ThreadState *tst,
+                              HChar *state, HChar *sname)
+{
+   switch (tst->status) {
+   case VgTs_Runnable:
+   case VgTs_Yielding:
+      *state = VKI_SRUN;
+      *sname = 'R';
+      break;
+
+   case VgTs_WaitSys:
+      *state = VKI_SSLEEP;
+      *sname = 'S';
+      break;
+
+   case VgTs_Zombie:
+      *state = VKI_SZOMB;
+      *sname = 'Z';
+      break;
+
+   case VgTs_Empty:
+   case VgTs_Init:
+      *state = 0;
+      *sname = '?';
+      break;
+   }
+}
+
+static void fill_siginfo(const vki_siginfo_t *si, vki_siginfo_t *di,
+                         Short *signo)
+{
+   di->si_signo = si->si_signo;
+   di->si_code  = si->si_code;
+   di->si_errno = 0;
+   di->si_addr = si->si_addr;
+   *signo = si->si_signo;
+}
+
+static void fill_argv(Int *argc, Addr *argv)
+{
+   Addr *ptr = (Addr *) VG_(get_initial_client_SP)();
+   *argc = *ptr++;
+   *argv = (Addr) ptr;
+}
+
+static void fill_scheduling_class(HChar *buf, SizeT buf_size)
+{
+   vg_assert(buf != NULL);
+   vg_assert(buf_size >= 1);
+
+   /* Valgrind currently schedules one thread at time which
+      resembles the default timeshare class. */
+   VG_(strncpy)(buf, "TS", buf_size);
+}
+
+static void fill_regset(vki_prgregset_t *regs, const ThreadState *tst)
+{
+   const ThreadArchState *arch = (const ThreadArchState *) &tst->arch;
+
+#if defined(VGP_x86_solaris)
+   (*regs)[VKI_EIP]        = arch->vex.guest_EIP;
+   (*regs)[VKI_EAX]        = arch->vex.guest_EAX;
+   (*regs)[VKI_EBX]        = arch->vex.guest_EBX;
+   (*regs)[VKI_ECX]        = arch->vex.guest_ECX;
+   (*regs)[VKI_EDX]        = arch->vex.guest_EDX;
+   (*regs)[VKI_ESI]        = arch->vex.guest_ESI;
+   (*regs)[VKI_EDI]        = arch->vex.guest_EDI;
+   (*regs)[VKI_EBP]        = arch->vex.guest_EBP;
+   (*regs)[VKI_UESP]       = arch->vex.guest_ESP;
+   (*regs)[VKI_SS]         = arch->vex.guest_SS;
+   (*regs)[VKI_CS]         = arch->vex.guest_CS;
+   (*regs)[VKI_DS]         = arch->vex.guest_DS;
+   (*regs)[VKI_ES]         = arch->vex.guest_ES;
+   (*regs)[VKI_FS]         = arch->vex.guest_FS;
+   (*regs)[VKI_GS]         = arch->vex.guest_GS;
+   (*regs)[VKI_EFL]        = LibVEX_GuestX86_get_eflags(&arch->vex);
+#elif defined(VGP_amd64_solaris)
+   (*regs)[VKI_REG_RIP]    = arch->vex.guest_RIP;
+   (*regs)[VKI_REG_RAX]    = arch->vex.guest_RAX;
+   (*regs)[VKI_REG_RBX]    = arch->vex.guest_RBX;
+   (*regs)[VKI_REG_RCX]    = arch->vex.guest_RCX;
+   (*regs)[VKI_REG_RDX]    = arch->vex.guest_RDX;
+   (*regs)[VKI_REG_RBP]    = arch->vex.guest_RBP;
+   (*regs)[VKI_REG_RSI]    = arch->vex.guest_RSI;
+   (*regs)[VKI_REG_RDI]    = arch->vex.guest_RDI;
+   (*regs)[VKI_REG_R8]     = arch->vex.guest_R8;
+   (*regs)[VKI_REG_R9]     = arch->vex.guest_R9;
+   (*regs)[VKI_REG_R10]    = arch->vex.guest_R10;
+   (*regs)[VKI_REG_R11]    = arch->vex.guest_R11;
+   (*regs)[VKI_REG_R12]    = arch->vex.guest_R12;
+   (*regs)[VKI_REG_R13]    = arch->vex.guest_R13;
+   (*regs)[VKI_REG_R14]    = arch->vex.guest_R14;
+   (*regs)[VKI_REG_R15]    = arch->vex.guest_R15;
+   (*regs)[VKI_REG_RSP]    = arch->vex.guest_RSP;
+   (*regs)[VKI_REG_CS]     = VKI_UCS_SEL;
+   (*regs)[VKI_REG_DS]     = 0;
+   (*regs)[VKI_REG_ES]     = 0;
+   (*regs)[VKI_REG_FS]     = 0;
+   (*regs)[VKI_REG_GS]     = 0;
+   (*regs)[VKI_REG_SS]     = VKI_UDS_SEL;
+   (*regs)[VKI_REG_FSBASE] = arch->vex.guest_FS_CONST;
+   (*regs)[VKI_REG_GSBASE] = 0;
+   (*regs)[VKI_REG_RFL]    = LibVEX_GuestAMD64_get_rflags(&arch->vex);
+#else
+#  error "Unknown platform"
+#endif
+}
+
+static void fill_fpregset(vki_fpregset_t *fpu, const ThreadState *tst)
+{
+   const ThreadArchState *arch = (const ThreadArchState *) &tst->arch;
+
+#if defined(VGP_x86_solaris)
+   VG_(memset)(fpu, 0, sizeof(*fpu));
+
+   struct vki_fpchip_state *fs = &fpu->fp_reg_set.fpchip_state;
+   vg_assert(sizeof(fs->state) == 108);
+
+   LibVEX_GuestX86_get_x87(CONST_CAST(VexGuestX86State *, &arch->vex),
+                           (UChar *) &fs->state);
+
+   /* SSE */
+   UInt mxcsr = LibVEX_GuestX86_get_mxcsr(CONST_CAST(VexGuestX86State *,
+                                                     &arch->vex));
+   fs->mxcsr = mxcsr;
+
+   /* XMM registers */
+   #define COPY_OUT_XMM(dest, src) \
+      do {                      \
+         dest._l[0] = src[0];   \
+         dest._l[1] = src[1];   \
+         dest._l[2] = src[2];   \
+         dest._l[3] = src[3];   \
+      } while (0);
+   COPY_OUT_XMM(fs->xmm[0], arch->vex.guest_XMM0);
+   COPY_OUT_XMM(fs->xmm[1], arch->vex.guest_XMM1);
+   COPY_OUT_XMM(fs->xmm[2], arch->vex.guest_XMM2);
+   COPY_OUT_XMM(fs->xmm[3], arch->vex.guest_XMM3);
+   COPY_OUT_XMM(fs->xmm[4], arch->vex.guest_XMM4);
+   COPY_OUT_XMM(fs->xmm[5], arch->vex.guest_XMM5);
+   COPY_OUT_XMM(fs->xmm[6], arch->vex.guest_XMM6);
+   COPY_OUT_XMM(fs->xmm[7], arch->vex.guest_XMM7);
+   #undef COPY_OUT_XMM
+#elif defined(VGP_amd64_solaris)
+   VG_(memset)(fpu, 0, sizeof(*fpu));
+   struct vki_fpchip_state *fs = &fpu->fp_reg_set.fpchip_state;
+
+   /* LibVEX_GuestAMD64_fxsave() requires at least 416 bytes. */
+   vg_assert(sizeof(*fs) >= 416);
+   LibVEX_GuestAMD64_fxsave(CONST_CAST(VexGuestAMD64State *, &arch->vex),
+                            (Addr) fs);
+#else
+#  error Unknown platform
+#endif
+}
+
+/*====================================================================*/
+/*=== Header fillers                                               ===*/
+/*====================================================================*/
+
+static void fill_ehdr(VKI_ESZ(Ehdr) *ehdr, Int num_phdrs)
+{
+   VG_(memset)(ehdr, 0, sizeof(*ehdr));
+
+   VG_(memcpy)(ehdr->e_ident, VKI_ELFMAG, VKI_SELFMAG);
+   ehdr->e_ident[VKI_EI_CLASS]   = VG_ELF_CLASS;
+   ehdr->e_ident[VKI_EI_DATA]    = VG_ELF_DATA2XXX;
+   ehdr->e_ident[VKI_EI_VERSION] = VKI_EV_CURRENT;
+
+   ehdr->e_type = VKI_ET_CORE;
+   ehdr->e_machine = VG_ELF_MACHINE;
+   ehdr->e_version = VKI_EV_CURRENT;
+   ehdr->e_entry = 0;
+   ehdr->e_flags = 0;
+   ehdr->e_ehsize = sizeof(VKI_ESZ(Ehdr));
+
+   ehdr->e_phoff = sizeof(VKI_ESZ(Ehdr));
+   ehdr->e_phentsize = sizeof(VKI_ESZ(Phdr));
+
+   /* If the count of program headers can't fit in the mere 16 bits
+    * shortsightedly allotted to them in the ELF header, we use the
+    * extended formats and put the real values in the section header
+    * at index 0.
+    */
+   if (num_phdrs >= VKI_PN_XNUM) {
+      ehdr->e_phnum = VKI_PN_XNUM;
+      ehdr->e_shnum = 1;
+      ehdr->e_shoff = ehdr->e_phoff + ehdr->e_phentsize * num_phdrs;
+      ehdr->e_shentsize = sizeof(VKI_ESZ(Shdr));
+   } else {
+      ehdr->e_phnum = num_phdrs;
+      ehdr->e_shnum = 0;
+      ehdr->e_shoff = 0;
+      ehdr->e_shentsize = 0;
+   }
+
+   ehdr->e_shstrndx = 0;
+}
+
+static void fill_phdr(VKI_ESZ(Phdr) *phdr, const NSegment *seg, UInt off,
+                      Bool really_write)
+{
+   SizeT len = seg->end - seg->start + 1;
+
+   really_write = really_write && should_dump(seg);
+
+   VG_(memset)(phdr, 0, sizeof(*phdr));
+
+   phdr->p_type = PT_LOAD;
+   phdr->p_offset = off;
+   phdr->p_vaddr = seg->start;
+   phdr->p_paddr = 0;
+   phdr->p_filesz = really_write ? len : 0;
+   phdr->p_memsz = len;
+   phdr->p_flags = 0;
+
+   if (seg->hasR)
+      phdr->p_flags |= PF_R;
+   if (seg->hasW)
+      phdr->p_flags |= PF_W;
+   if (seg->hasX)
+      phdr->p_flags |= PF_X;
+
+   phdr->p_align = VKI_PAGE_SIZE;
+}
+
+/* Fills the section header at index zero when num_phdrs >= PN_XNUM. */
+static void fill_zero_shdr(VKI_ESZ(Shdr) *shdr, UInt num_phdrs)
+{
+   vg_assert(num_phdrs >= VKI_PN_XNUM);
+
+   VG_(memset)(shdr, 0, sizeof(*shdr));
+
+   shdr->sh_name = 0; // STR_NONE
+   shdr->sh_info = num_phdrs;
+}
+
+static void fill_prpsinfo(vki_elf_prpsinfo_t *prpsinfo,
+                          const ThreadState *tst,
+                          const vki_siginfo_t *si)
+{
+   VG_(memset)(prpsinfo, 0, sizeof(*prpsinfo));
+
+   fill_thread_state(tst, &prpsinfo->pr_state, &prpsinfo->pr_sname);
+   prpsinfo->pr_uid = get_uid();
+   prpsinfo->pr_gid = get_gid();
+   prpsinfo->pr_pid = VG_(getpid)();
+   prpsinfo->pr_ppid = VG_(getppid)();
+   prpsinfo->pr_pgrp = VG_(getpgrp)();
+   prpsinfo->pr_sid = VG_(getpgrp)();
+   fill_scheduling_class(prpsinfo->pr_clname, sizeof(prpsinfo->pr_clname));
+   VG_(client_fname)(prpsinfo->pr_fname, sizeof(prpsinfo->pr_fname), True);
+   VG_(client_cmd_and_args)(prpsinfo->pr_psargs,
+                            sizeof(prpsinfo->pr_psargs));
+   fill_argv(&prpsinfo->pr_argc, (Addr *) &prpsinfo->pr_argv);
+   prpsinfo->pr_envp = (char **) VG_(client_envp);
+   prpsinfo->pr_wstat = get_wstat(si);
+   prpsinfo->pr_euid = VG_(geteuid)();
+   prpsinfo->pr_egid = VG_(getegid)();
+   prpsinfo->pr_dmodel = get_dmodel();
+}
+
+static void fill_prstatus(vki_elf_prstatus_t *prs,
+                          const ThreadState *tst,
+			  const vki_siginfo_t *si)
+{
+   VG_(memset)(prs, 0, sizeof(*prs));
+
+   prs->pr_flags = VKI_ELF_OLD_PR_PCINVAL;
+   fill_siginfo(si, &prs->pr_info, &prs->pr_cursig);
+   prs->pr_nlwp = VG_(count_living_threads)();
+   prs->pr_sighold = tst->sig_mask;
+   prs->pr_pid = VG_(getpid)();
+   prs->pr_ppid = VG_(getppid)();
+   prs->pr_pgrp = VG_(getpgrp)();
+   prs->pr_sid = VG_(getpgrp)();
+   fill_scheduling_class(prs->pr_clname, sizeof(prs->pr_clname));
+   prs->pr_who = tst->os_state.lwpid; 
+   prs->pr_brkbase = (vki_caddr_t) VG_(brk_base);
+   prs->pr_brksize = VG_(brk_limit) - VG_(brk_base);
+   prs->pr_stkbase = (vki_caddr_t) compute_stkbase(tst);
+   prs->pr_stksize = tst->client_stack_szB;
+   fill_regset(&prs->pr_reg, tst);
+}
+
+static void fill_psinfo(vki_psinfo_t *psinfo, const ThreadState *tst,
+                        const vki_siginfo_t *si)
+{
+   VG_(memset)(psinfo, 0, sizeof(*psinfo));
+
+   psinfo->pr_nlwp = VG_(count_living_threads)();
+   psinfo->pr_uid = get_uid();
+   psinfo->pr_gid = get_gid();
+   psinfo->pr_pid = VG_(getpid)();
+   psinfo->pr_ppid = VG_(getppid)();
+   psinfo->pr_pgid = VG_(getpgrp)();
+   psinfo->pr_sid = VG_(getpgrp)();
+   psinfo->pr_euid = VG_(geteuid)();
+   psinfo->pr_egid = VG_(getegid)();
+   VG_(client_fname)(psinfo->pr_fname, sizeof(psinfo->pr_fname), True);
+   psinfo->pr_wstat = get_wstat(si);
+   VG_(client_cmd_and_args)(psinfo->pr_psargs,
+                            sizeof(psinfo->pr_psargs));
+   fill_argv(&psinfo->pr_argc, (Addr *) &psinfo->pr_argv);
+   psinfo->pr_envp = (uintptr_t) VG_(client_envp);
+   psinfo->pr_dmodel = get_dmodel();
+   psinfo->pr_zoneid = get_zoneid();
+
+   psinfo->pr_lwp.pr_lwpid = tst->os_state.lwpid;
+   fill_thread_state(tst, &psinfo->pr_lwp.pr_state,
+                     &psinfo->pr_lwp.pr_sname);
+   fill_scheduling_class(psinfo->pr_lwp.pr_clname,
+                         sizeof(psinfo->pr_lwp.pr_clname));
+}
+
+static void fill_pstatus(vki_pstatus_t *pstatus,
+                         const ThreadState *tst,
+                         const vki_siginfo_t *si)
+{
+   VG_(memset)(pstatus, 0, sizeof(*pstatus));
+
+   pstatus->pr_flags = VKI_PR_PCINVAL;
+   pstatus->pr_nlwp = VG_(count_living_threads)();
+   pstatus->pr_pid = VG_(getpid)();
+   pstatus->pr_ppid = VG_(getppid)();
+   pstatus->pr_pgid = VG_(getpgrp)();
+   pstatus->pr_sid = VG_(getpgrp)();
+   pstatus->pr_brkbase = (uintptr_t) VG_(brk_base);
+   pstatus->pr_brksize = VG_(brk_limit) - VG_(brk_base);
+   pstatus->pr_stkbase = (uintptr_t) compute_stkbase(tst);
+   pstatus->pr_stksize = tst->client_stack_szB;
+   pstatus->pr_dmodel = get_dmodel();
+   pstatus->pr_zoneid = get_zoneid();
+
+   pstatus->pr_lwp.pr_flags = VKI_PR_PCINVAL;
+   pstatus->pr_lwp.pr_lwpid = tst->os_state.lwpid;
+   fill_siginfo(si, &pstatus->pr_lwp.pr_info,
+                &pstatus->pr_lwp.pr_cursig);
+   pstatus->pr_lwp.pr_lwphold = tst->sig_mask;
+   fill_scheduling_class(pstatus->pr_lwp.pr_clname,
+                         sizeof(pstatus->pr_lwp.pr_clname));
+   fill_regset(&pstatus->pr_lwp.pr_reg, tst);
+   fill_fpregset(&pstatus->pr_lwp.pr_fpreg, tst);
+}
+
+#if defined(SOLARIS_PRXREGSET_T)
+static void fill_xregs(vki_prxregset_t *xregs, const ThreadState *tst)
+{
+   const ThreadArchState *arch = (const ThreadArchState *) &tst->arch;
+
+#if defined(VGP_x86_solaris)
+   VG_(memset)(xregs, 0, sizeof(*xregs));
+   xregs->pr_xsize = sizeof(xregs->pr_un.pr_xsave);
+
+   /* SSE */
+   UInt mxcsr = LibVEX_GuestX86_get_mxcsr(CONST_CAST(VexGuestX86State *,
+                                                     &arch->vex));
+   xregs->pr_un.pr_xsave.pr_mxcsr = mxcsr;
+
+   /* XMM registers */
+   #define COPY_OUT_XMM(dest, src) \
+      do {                      \
+         dest._l[0] = src[0];   \
+         dest._l[1] = src[1];   \
+         dest._l[2] = src[2];   \
+         dest._l[3] = src[3];   \
+      } while (0);
+   COPY_OUT_XMM(xregs->pr_un.pr_xsave.pr_xmm[0], arch->vex.guest_XMM0);
+   COPY_OUT_XMM(xregs->pr_un.pr_xsave.pr_xmm[1], arch->vex.guest_XMM1);
+   COPY_OUT_XMM(xregs->pr_un.pr_xsave.pr_xmm[2], arch->vex.guest_XMM2);
+   COPY_OUT_XMM(xregs->pr_un.pr_xsave.pr_xmm[3], arch->vex.guest_XMM3);
+   COPY_OUT_XMM(xregs->pr_un.pr_xsave.pr_xmm[4], arch->vex.guest_XMM4);
+   COPY_OUT_XMM(xregs->pr_un.pr_xsave.pr_xmm[5], arch->vex.guest_XMM5);
+   COPY_OUT_XMM(xregs->pr_un.pr_xsave.pr_xmm[6], arch->vex.guest_XMM6);
+   COPY_OUT_XMM(xregs->pr_un.pr_xsave.pr_xmm[7], arch->vex.guest_XMM7);
+   #undef COPY_OUT_XMM
+
+#elif defined(VGP_amd64_solaris)
+   VG_(memset)(xregs, 0, sizeof(*xregs));
+   xregs->pr_xsize = sizeof(xregs->pr_un.pr_xsave);
+
+   /* LibVEX_GuestAMD64_fxsave() requires at least 416 bytes. */
+   vg_assert(sizeof(xregs->pr_un.pr_xsave) >= 416);
+   LibVEX_GuestAMD64_fxsave(CONST_CAST(VexGuestAMD64State *, &arch->vex),
+                            (Addr) &xregs->pr_un.pr_xsave);
+#else
+#  error "Unknown platform"
+#endif
+}
+#endif /* SOLARIS_PRXREGSET_T */
+
+static void fill_utsname(struct vki_utsname *uts)
+{
+   VG_(memset)(uts, 0, sizeof(*uts));
+
+   VG_(do_syscall3)(SYS_systeminfo, VKI_SI_SYSNAME,
+                    (UWord) &uts->sysname, sizeof(uts->sysname));
+   VG_(do_syscall3)(SYS_systeminfo, VKI_SI_HOSTNAME,
+                    (UWord) &uts->nodename, sizeof(uts->nodename));
+   VG_(do_syscall3)(SYS_systeminfo, VKI_SI_RELEASE,
+                    (UWord) &uts->release, sizeof(uts->release));
+   VG_(do_syscall3)(SYS_systeminfo, VKI_SI_VERSION,
+                    (UWord) &uts->version, sizeof(uts->version));
+   VG_(do_syscall3)(SYS_systeminfo, VKI_SI_MACHINE,
+                    (UWord) &uts->machine, sizeof(uts->machine));
+}
+
+static vki_prcred_t *create_prcred(SizeT *size)
+{
+   UInt group_list[VKI_NGROUPS_MAX];
+   Int ngroups = VG_(getgroups)(VKI_NGROUPS_MAX, group_list);
+   if (ngroups == -1)
+      ngroups = 0;
+
+   *size = sizeof(vki_prcred_t) + (ngroups - 1) * sizeof(gid_t);
+   vki_prcred_t *prcred = VG_(malloc)("coredump-elf.cp.1", *size);
+   VG_(memset)(prcred, 0, *size);
+
+   prcred->pr_euid = VG_(geteuid)();
+   prcred->pr_ruid = get_uid();
+   prcred->pr_suid = prcred->pr_euid;
+   prcred->pr_egid = VG_(getegid)();
+   prcred->pr_rgid = get_gid();
+   prcred->pr_sgid = prcred->pr_egid;
+   prcred->pr_ngroups = ngroups;
+
+   UInt i;
+   for (i = 0; i < ngroups; i++)
+      prcred->pr_groups[i] = group_list[i];
+
+   return prcred;
+}
+
+static void fill_core_content(vki_core_content_t *content)
+{
+   *content = VKI_CC_CONTENT_STACK | VKI_CC_CONTENT_HEAP
+              | VKI_CC_CONTENT_SHANON | VKI_CC_CONTENT_TEXT
+              | VKI_CC_CONTENT_DATA | VKI_CC_CONTENT_RODATA
+              | VKI_CC_CONTENT_ANON | VKI_CC_CONTENT_SHM
+              | VKI_CC_CONTENT_ISM | VKI_CC_CONTENT_DISM;
+}
+
+static vki_prpriv_t *create_prpriv(SizeT *size)
+{
+   Int fd = VG_(fd_open)("/proc/self/priv", O_RDONLY, 0);
+   if (fd < 0)
+      return NULL;
+
+   struct vg_stat stats;
+   if (VG_(fstat)(fd, &stats) != 0) {
+      VG_(close)(fd);
+      return NULL;
+   }
+
+   vki_prpriv_t *prpriv = VG_(malloc)("coredump-elf.cp.1", stats.size);
+
+   if (VG_(read)(fd, prpriv, stats.size) != stats.size) {
+      VG_(free)(prpriv);
+      VG_(close)(fd);
+      return NULL;
+   }
+
+   VG_(close)(fd);
+   *size = stats.size;
+   return prpriv;
+}
+
+static vki_priv_impl_info_t *create_priv_info(SizeT *size)
+{
+   /* Size of the returned priv_impl_info_t is apriori unkown. */
+   vki_priv_impl_info_t first_cut[100];
+   SysRes sres = VG_(do_syscall5)(SYS_privsys, VKI_PRIVSYS_GETIMPLINFO,
+                                  0, 0, (UWord) first_cut,
+                                  sizeof(first_cut));
+   if (sr_isError(sres))
+       return NULL;
+
+   SizeT real_size = first_cut[0].priv_headersize
+                     + first_cut[0].priv_globalinfosize;
+   vki_priv_impl_info_t *priv_info = VG_(malloc)("coredump-elf.cpi.1",
+                                                 real_size);
+
+   if (real_size <= sizeof(first_cut)) {
+      /* if the first_cut was large enough */
+      VG_(memcpy)(priv_info, first_cut, real_size);
+   } else {
+      /* otherwise repeat the syscall with buffer large enough */
+      sres = VG_(do_syscall5)(SYS_privsys, VKI_PRIVSYS_GETIMPLINFO,
+                              0, 0, (UWord) priv_info, real_size);
+      if (sr_isError(sres)) {
+          VG_(free)(priv_info);
+          return NULL;
+      }
+   }
+
+   *size = real_size;
+   return priv_info;
+}
+
+static void fill_lwpsinfo(vki_lwpsinfo_t *lwp,
+                          const ThreadState *tst)
+{
+   VG_(memset)(lwp, 0, sizeof(*lwp));
+
+   lwp->pr_lwpid = tst->os_state.lwpid;
+   fill_thread_state(tst, &lwp->pr_state, &lwp->pr_sname);
+   fill_scheduling_class(lwp->pr_clname, sizeof(lwp->pr_clname));
+}
+
+static void fill_lwpstatus(vki_lwpstatus_t *lwp,
+                           const ThreadState *tst,
+			   const vki_siginfo_t *si)
+{
+   VG_(memset)(lwp, 0, sizeof(*lwp));
+
+   lwp->pr_flags = VKI_PR_PCINVAL;
+   lwp->pr_lwpid = tst->os_state.lwpid;
+   fill_siginfo(si, &lwp->pr_info, &lwp->pr_cursig);
+   fill_scheduling_class(lwp->pr_clname, sizeof(lwp->pr_clname));
+   fill_regset(&lwp->pr_reg, tst);
+   fill_fpregset(&lwp->pr_fpreg, tst);
+}
+
+static void fill_old_note_for_thread(note_t **notes,
+                                     const ThreadState *tst,
+                                     const vki_siginfo_t *si)
+{
+   vki_elf_prstatus_t prstatus;
+   fill_prstatus(&prstatus, tst, si);
+   add_note(notes, VKI_NT_PRSTATUS, &prstatus, sizeof(vki_elf_prstatus_t));
+
+   vki_fpregset_t fpu;
+   fill_fpregset(&fpu, tst);
+   add_note(notes, VKI_NT_PRFPREG, &fpu, sizeof(vki_fpregset_t));
+
+#if defined(SOLARIS_PRXREGSET_T)
+   if (should_dump_xregs(tst)) {
+      vki_prxregset_t xregs;
+      fill_xregs(&xregs, tst);
+      add_note(notes, VKI_NT_PRXREG, &xregs, sizeof(vki_prxregset_t));
+   }
+#endif /* SOLARIS_PRXREGSET_T */
+}
+
+static void fill_new_note_for_thread(note_t **notes,
+                                     const ThreadState *tst,
+                                     const vki_siginfo_t *si)
+{
+   vki_lwpsinfo_t lwpsinfo;
+   fill_lwpsinfo(&lwpsinfo, tst);
+   add_note(notes, VKI_NT_LWPSINFO, &lwpsinfo, sizeof(vki_lwpsinfo_t));
+
+   vki_lwpstatus_t lwpstatus;
+   fill_lwpstatus(&lwpstatus, tst, si);
+   add_note(notes, VKI_NT_LWPSTATUS, &lwpstatus, sizeof(vki_lwpstatus_t));
+
+#if defined(SOLARIS_PRXREGSET_T)
+   if (should_dump_xregs(tst)) {
+      vki_prxregset_t xregs;
+      fill_xregs(&xregs, tst);
+      add_note(notes, VKI_NT_PRXREG, &xregs, sizeof(vki_prxregset_t));
+   }
+#endif /* SOLARIS_PRXREGSET_T */
+}
+
+/*====================================================================*/
+/*=== Note utility functions                                       ===*/
+/*====================================================================*/
+
+static void add_note(note_t **list, UInt type, const void *data,
+                     UInt datasz)
+{
+   UInt note_size = sizeof(note_t) + VG_ROUNDUP(datasz, 4);
+
+   note_t *n = VG_(malloc)("coredump-elf.an.1", note_size);
+
+   VG_(memset)(n, 0, note_size);
+   n->nhdr.n_type = type;
+   n->nhdr.n_namesz = 5;
+   n->nhdr.n_descsz = VG_ROUNDUP(datasz, 4);
+   VG_(memcpy)(n->name, "CORE", 4);
+   VG_(memcpy)(n->data, data, datasz);
+
+   if (*list == NULL) {
+      *list = n;
+      return;
+   }
+
+   note_t *tail = *list;
+   while (tail->next != NULL)
+      tail = tail->next;
+   tail->next = n;
+}
+
+static UInt note_size(const note_t *note)
+{
+   return sizeof(note_t) - sizeof(note_t *) + note->nhdr.n_descsz;
+}
+
+static UInt notes_size(const note_t *list)
+{
+   UInt size = 0;
+   const note_t *note;
+
+   for (note = list; note != NULL; note = note->next)
+      size += note_size(note);
+
+   return size;
+}
+
+static void fill_notes_phdr(VKI_ESZ(Phdr) *phdr, UInt offset,
+                            UInt size_of_notes)
+{
+   phdr->p_type = PT_NOTE;
+   phdr->p_offset = offset;
+   phdr->p_vaddr = 0;
+   phdr->p_paddr = 0;
+   phdr->p_filesz = size_of_notes;
+   phdr->p_memsz = 0;
+   phdr->p_flags = PF_R;
+   phdr->p_align = 0;
+}
+
+static void write_notes(Int fd, const HChar *filename,
+                        const note_t *list)
+{
+   const note_t *note;
+
+   for (note = list; note != NULL; note = note->next)
+      write_part(fd, filename, CONST_CAST(void *, &note->nhdr),
+                 note_size(note), "notes");
+}
+
+static void free_notes(note_t *list)
+{
+   while (list != NULL) {
+      note_t *next = list->next;
+      VG_(free)(list);
+      list = next;
+   }
+}
+
+/*====================================================================*/
+/*=== Main coredump function                                       ===*/
+/*====================================================================*/
+
+void VG_(make_coredump)(ThreadId tid, const vki_siginfo_t *si,
+                        ULong max_size)
+{
+   const HChar *basename = "vgcore";
+   const HChar *coreext = "";
+   Int core_fd;
+
+   if (VG_(clo_log_fname_expanded) != NULL) {
+      coreext = ".core";
+      basename = VG_(expand_file_name)("--log-file",
+                                       VG_(clo_log_fname_expanded));
+   }
+
+   vg_assert(coreext != NULL);
+   vg_assert(basename != NULL);
+
+   UInt filename_size = VG_(strlen)(coreext) + VG_(strlen)(basename)
+                        + 100; /* for the two %d's */
+   HChar *filename = VG_(malloc)("coredump-elf.mc.1", filename_size);
+
+   /* Try to come with a non-existent coredump filename. */
+   UInt seq = 0;
+   for (;;) {
+      Int oflags = VKI_O_CREAT|VKI_O_WRONLY|VKI_O_EXCL|VKI_O_TRUNC;
+
+      if (seq == 0)
+	 VG_(snprintf)(filename, filename_size, "%s%s.%d",
+		      basename, coreext, VG_(getpid)());
+      else
+	 VG_(snprintf)(filename, filename_size, "%s%s.%d.%d",
+		      basename, coreext, VG_(getpid)(), seq);
+      seq++;
+
+#ifdef VKI_O_LARGEFILE
+      oflags |= VKI_O_LARGEFILE;
+#endif
+
+      SysRes sres = VG_(open)(filename, oflags,
+                              VKI_S_IRUSR|VKI_S_IWUSR);
+      if (!sr_isError(sres)) {
+         core_fd = sr_Res(sres);
+	 break;
+      }
+
+      if (sr_isError(sres) && sr_Err(sres) != VKI_EEXIST) {
+         VG_(umsg)("Cannot create coredump file %s (%lu)\n",
+                   filename, sr_Err(sres));
+         VG_(free)(filename);
+	 return;
+      }
+   }
+
+   /* Get the client segments. Free seg_starts after use. */
+   Int n_seg_starts;
+   Addr *seg_starts = VG_(get_segment_starts)(SkFileC | SkAnonC | SkShmC,
+                                              &n_seg_starts);
+
+   /* Count how many memory segments to dump. */
+   Int i;
+   UInt num_phdrs = 2;		/* two CORE note sections */
+   for (i = 0; i < n_seg_starts; i++) {
+      if (!may_dump(VG_(am_find_nsegment(seg_starts[i]))))
+	 continue;
+
+      num_phdrs++;
+   }
+
+   VKI_ESZ(Ehdr) ehdr;
+   fill_ehdr(&ehdr, num_phdrs);
+
+   VKI_ESZ(Shdr) shdr;
+   if (ehdr.e_shnum > 0)
+      fill_zero_shdr(&shdr, num_phdrs);
+   UInt phdrs_size = num_phdrs * ehdr.e_phentsize;
+
+   /* Construct the old-style notes. */
+   note_t *old_notes = NULL;
+
+   vki_elf_prpsinfo_t prpsinfo;
+   fill_prpsinfo(&prpsinfo, &VG_(threads)[tid], si);
+   add_note(&old_notes, VKI_NT_PRPSINFO, &prpsinfo,
+            sizeof(vki_elf_prpsinfo_t));
+
+   HChar platform[256 + 1];
+   fill_platform(platform, sizeof(platform));
+   add_note(&old_notes, VKI_NT_PLATFORM, platform,
+            VG_(strlen)(platform) + 1);
+
+   add_note(&old_notes, VKI_NT_AUXV, VG_(client_auxv),
+            count_auxv() * sizeof(auxv_t));
+
+   /* Add detail about the faulting thread as the first note.
+      This is how gdb determines which thread faulted. Note that
+      mdb does not need such aid. */
+   fill_old_note_for_thread(&old_notes, &VG_(threads)[tid], si);
+
+   /* Now add details for all threads except the one that faulted. */
+   ThreadId t_idx;
+   for (t_idx = 1; t_idx < VG_N_THREADS; t_idx++)
+      if ((VG_(threads)[t_idx].status != VgTs_Empty) &&
+            (VG_(threads)[t_idx].status != VgTs_Zombie)) {
+         if (t_idx == tid)
+            continue;
+
+         fill_old_note_for_thread(&old_notes, &VG_(threads)[t_idx], si);
+   }
+
+   /* Construct the new-style notes. */
+   note_t *new_notes = NULL;
+   vki_psinfo_t psinfo;
+   fill_psinfo(&psinfo, &VG_(threads)[tid], si);
+   add_note(&new_notes, VKI_NT_PSINFO, &psinfo, sizeof(vki_psinfo_t));
+
+   vki_pstatus_t pstatus;
+   fill_pstatus(&pstatus, &VG_(threads)[tid], si);
+   add_note(&new_notes, VKI_NT_PSTATUS, &pstatus, sizeof(vki_pstatus_t));
+
+   add_note(&new_notes, VKI_NT_PLATFORM, platform,
+            VG_(strlen)(platform) + 1);
+
+   add_note(&new_notes, VKI_NT_AUXV, VG_(client_auxv),
+            count_auxv() * sizeof(auxv_t));
+
+   struct vki_utsname uts;
+   fill_utsname(&uts);
+   add_note(&new_notes, VKI_NT_UTSNAME, &uts,
+            sizeof(struct vki_utsname));
+
+   SizeT prcred_size;
+   vki_prcred_t *prcred = create_prcred(&prcred_size);
+   if (prcred != NULL) {
+      add_note(&new_notes, VKI_NT_PRCRED, prcred, prcred_size);
+      VG_(free)(prcred);
+   }
+
+   vki_core_content_t core_content;
+   fill_core_content(&core_content);
+   add_note(&new_notes, VKI_NT_CONTENT, &core_content,
+            sizeof(vki_core_content_t));
+
+   SizeT priv_size;
+   vki_prpriv_t *prpriv = create_prpriv(&priv_size);
+   if (prpriv != NULL) {
+      add_note(&new_notes, VKI_NT_PRPRIV, prpriv, priv_size);
+      VG_(free)(prpriv);
+   }
+
+   vki_priv_impl_info_t *priv_info = create_priv_info(&priv_size);
+   if (priv_info != NULL) {
+      add_note(&new_notes, VKI_NT_PRPRIVINFO, priv_info, priv_size);
+      VG_(free)(priv_info);
+   }
+
+   HChar zonename[VKI_ZONENAME_MAX + 1];
+   fill_zonename(zonename, sizeof(zonename));
+   add_note(&new_notes, VKI_NT_ZONENAME, zonename,
+            VG_(strlen)(zonename) + 1);
+
+   /* Add detail about the faulting thread as the first note.
+      This is how gdb determines which thread faulted. Note that
+      mdb does not need such aid. */
+   fill_new_note_for_thread(&new_notes, &VG_(threads)[tid], si);
+
+   /* Now add details for all threads except the one that faulted. */
+   for (t_idx = 1; t_idx < VG_N_THREADS; t_idx++) {
+      if ((VG_(threads)[t_idx].status != VgTs_Empty) &&
+            (VG_(threads)[t_idx].status != VgTs_Zombie)) {
+         if (t_idx == tid)
+            continue;
+
+         fill_new_note_for_thread(&new_notes, &VG_(threads)[t_idx], si);
+      }
+   }
+
+   VKI_ESZ(Phdr) *phdrs = VG_(malloc)("coredump-elf.mc.2", phdrs_size);
+
+   UInt size_of_notes = notes_size(old_notes);
+   UInt offset = ehdr.e_ehsize + phdrs_size +
+                 (ehdr.e_shnum * ehdr.e_shentsize);
+
+   /* fill program header for old notes */
+   fill_notes_phdr(&phdrs[0], offset, size_of_notes);
+   offset += size_of_notes;
+
+   size_of_notes = notes_size(new_notes);
+   /* fill program header for new notes */
+   fill_notes_phdr(&phdrs[1], offset, size_of_notes);
+   offset += size_of_notes;
+
+   /* fill program headers for segments */
+   UInt idx;
+   for (i = 0, idx = 2; i < n_seg_starts; i++) {
+      NSegment const *seg = VG_(am_find_nsegment(seg_starts[i]));
+
+      if (!may_dump(seg))
+	 continue;
+
+      fill_phdr(&phdrs[idx], seg, offset,
+                (seg->end - seg->start + 1 + offset) < max_size);
+      
+      offset += phdrs[idx].p_filesz;
+
+      idx++;
+   }
+
+   /* write everything out */
+   write_part(core_fd, filename, &ehdr, sizeof(ehdr),
+             "elf headers");
+   write_part(core_fd, filename, phdrs, phdrs_size,
+              "program headers");
+   if (ehdr.e_shnum > 0)
+      write_part(core_fd, filename, &shdr, sizeof(shdr),
+                 "section headers");
+   write_notes(core_fd, filename, old_notes);
+   write_notes(core_fd, filename, new_notes);
+
+   VG_(lseek)(core_fd, phdrs[2].p_offset, VKI_SEEK_SET);
+
+   for (i = 0, idx = 2; i < n_seg_starts; i++) {
+      NSegment const *seg = VG_(am_find_nsegment(seg_starts[i]));
+
+      if (!should_dump(seg))
+	 continue;
+
+      if (phdrs[idx].p_filesz > 0) {
+         Off64T off = VG_(lseek)(core_fd, phdrs[idx].p_offset,
+                                 VKI_SEEK_SET);
+         vg_assert(off == phdrs[idx].p_offset);
+         vg_assert(seg->end - seg->start + 1 >= phdrs[idx].p_filesz);
+
+         write_part(core_fd, filename, (void *) seg->start,
+                    phdrs[idx].p_filesz, "program segment");
+      }
+      idx++;
+   }
+
+   VG_(close)(core_fd);
+   VG_(free)(filename);
+   VG_(free)(phdrs);
+   free_notes(old_notes);
+   free_notes(new_notes);
+   VG_(free)(seg_starts);
+}
+
+#endif
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_debugger.c b/coregrind/m_debugger.c
index 4582db0..7bc809a 100644
--- a/coregrind/m_debugger.c
+++ b/coregrind/m_debugger.c
@@ -448,6 +448,12 @@
 
    return VG_(ptrace)(VKI_PTRACE_SETREGS, pid, NULL, &regs);
 
+#elif defined(VGP_x86_solaris)
+   I_die_here;
+
+#elif defined(VGP_amd64_solaris)
+   I_die_here;
+
 #else
 #  error Unknown arch
 #endif
diff --git a/coregrind/m_debuginfo/d3basics.c b/coregrind/m_debuginfo/d3basics.c
index 09d1b12..9a6051a 100644
--- a/coregrind/m_debuginfo/d3basics.c
+++ b/coregrind/m_debuginfo/d3basics.c
@@ -400,10 +400,12 @@
 static Bool get_Dwarf_Reg( /*OUT*/Addr* a, Word regno, const RegSummary* regs )
 {
    vg_assert(regs);
-#  if defined(VGP_x86_linux) || defined(VGP_x86_darwin)
+#  if defined(VGP_x86_linux) || defined(VGP_x86_darwin) \
+      || defined(VGP_x86_solaris)
    if (regno == 5/*EBP*/) { *a = regs->fp; return True; }
    if (regno == 4/*ESP*/) { *a = regs->sp; return True; }
-#  elif defined(VGP_amd64_linux) || defined(VGP_amd64_darwin)
+#  elif defined(VGP_amd64_linux) || defined(VGP_amd64_darwin) \
+        || defined(VGP_amd64_solaris)
    if (regno == 6/*RBP*/) { *a = regs->fp; return True; }
    if (regno == 7/*RSP*/) { *a = regs->sp; return True; }
 #  elif defined(VGP_ppc32_linux)
diff --git a/coregrind/m_debuginfo/debuginfo.c b/coregrind/m_debuginfo/debuginfo.c
index b69c1c8..21a2bb7 100644
--- a/coregrind/m_debuginfo/debuginfo.c
+++ b/coregrind/m_debuginfo/debuginfo.c
@@ -56,7 +56,7 @@
 #include "priv_tytypes.h"
 #include "priv_storage.h"
 #include "priv_readdwarf.h"
-#if defined(VGO_linux)
+#if defined(VGO_linux) || defined(VGO_solaris)
 # include "priv_readelf.h"
 # include "priv_readdwarf3.h"
 # include "priv_readpdb.h"
@@ -597,7 +597,7 @@
 /*---                                                        ---*/
 /*--------------------------------------------------------------*/
 
-#if defined(VGO_linux)  ||  defined(VGO_darwin)
+#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 /* Helper (indirect) for di_notify_ACHIEVE_ACCEPT_STATE */
 static Bool overlaps_DebugInfoMappings ( const DebugInfoMapping* map1,
@@ -745,7 +745,7 @@
    truncate_DebugInfoMapping_overlaps( di, di->fsm.maps );
 
    /* And acquire new info. */
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    ok = ML_(read_elf_debug_info)( di );
 #  elif defined(VGO_darwin)
    ok = ML_(read_macho_debug_info)( di );
@@ -965,6 +965,11 @@
    is_ro_map = seg->hasR && !seg->hasW && !seg->hasX;
 #  endif
 
+#  if defined(VGO_solaris)
+   is_rx_map = seg->hasR && seg->hasX && !seg->hasW;
+   is_rw_map = seg->hasR && seg->hasW;
+#  endif
+
    if (debug)
       VG_(printf)("di_notify_mmap-3: "
                   "is_rx_map %d, is_rw_map %d, is_ro_map %d\n",
@@ -1017,7 +1022,7 @@
    vg_assert(sr_Res(preadres) > 0 && sr_Res(preadres) <= sizeof(buf1k) );
 
    /* We're only interested in mappings of object files. */
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    if (!ML_(is_elf_object_file)( buf1k, (SizeT)sr_Res(preadres), False ))
       return 0;
 #  elif defined(VGO_darwin)
@@ -1422,7 +1427,7 @@
    if (pdbname) ML_(dinfo_free)(pdbname);
 }
 
-#endif /* defined(VGO_linux) || defined(VGO_darwin) */
+#endif /* defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris) */
 
 
 /*------------------------------------------------------------*/
@@ -1939,6 +1944,8 @@
 #      elif defined(VGO_darwin)
        // See readmacho.c for an explanation of this.
        VG_STREQ("start_according_to_valgrind", name) ||  // Darwin, darling
+#      elif defined(VGO_solaris)
+       VG_STREQ("_start", name) || // main() is called directly from _start
 #      else
 #        error "Unknown OS"
 #      endif
diff --git a/coregrind/m_debuginfo/priv_readpdb.h b/coregrind/m_debuginfo/priv_readpdb.h
index 117a8c5..50da29b 100644
--- a/coregrind/m_debuginfo/priv_readpdb.h
+++ b/coregrind/m_debuginfo/priv_readpdb.h
@@ -32,7 +32,7 @@
    The GNU General Public License is contained in the file COPYING.
 */
 
-#if defined(VGO_linux) || defined(VGO_darwin)
+#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 #ifndef __PRIV_READPDB_H
 #define __PRIV_READPDB_H
@@ -59,7 +59,7 @@
 
 #endif /* ndef __PRIV_READPDB_H */
 
-#endif // defined(VGO_linux) || defined(VGO_darwin)
+#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 /*--------------------------------------------------------------------*/
 /*--- end                                                          ---*/
diff --git a/coregrind/m_debuginfo/readdwarf.c b/coregrind/m_debuginfo/readdwarf.c
index 3909327..0111d6a 100644
--- a/coregrind/m_debuginfo/readdwarf.c
+++ b/coregrind/m_debuginfo/readdwarf.c
@@ -29,7 +29,7 @@
    The GNU General Public License is contained in the file COPYING.
 */
 
-#if defined(VGO_linux) || defined(VGO_darwin)
+#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 #include "pub_core_basics.h"
 #include "pub_core_debuginfo.h"
@@ -1719,11 +1719,11 @@
 
 /* --------------- Decls --------------- */
 
-#if defined(VGP_x86_linux)
+#if defined(VGP_x86_linux) || defined(VGP_x86_solaris)
 #  define FP_REG         5
 #  define SP_REG         4
 #  define RA_REG_DEFAULT 8
-#elif defined(VGP_amd64_linux)
+#elif defined(VGP_amd64_linux) || defined(VGP_amd64_solaris)
 #  define FP_REG         6
 #  define SP_REG         7
 #  define RA_REG_DEFAULT 16
@@ -1825,6 +1825,7 @@
     DW_CFA_GNU_window_save    = 0x2d, /* GNU extension */
     DW_CFA_GNU_args_size      = 0x2e, /* GNU extension */
     DW_CFA_GNU_negative_offset_extended = 0x2f, /* GNU extension */
+    DW_CFA_ORCL_arg_loc       = 0x30, /* Oracle extension */
     DW_CFA_hi_user            = 0x3f
   };
 
@@ -2023,6 +2024,7 @@
       DiCursor ehframe_image;
       Addr     ehframe_avma;
       Addr     text_bias;
+      Addr     got_avma;
    }
    AddressDecodingInfo;
 
@@ -2650,6 +2652,7 @@
    UChar    encoding      = adi->encoding;
    DiCursor ehframe_image = adi->ehframe_image;
    Addr     ehframe_avma  = adi->ehframe_avma;
+   Addr     got_avma      = adi->got_avma;
 
    vg_assert((encoding & DW_EH_PE_indirect) == 0);
 
@@ -2661,8 +2664,7 @@
          base = ehframe_avma + ML_(cur_minus)(*data, ehframe_image);
          break;
       case DW_EH_PE_datarel:
-         vg_assert(0);
-         base = /* data base address */ 0;
+         base = got_avma;
          break;
       case DW_EH_PE_textrel:
          vg_assert(0);
@@ -3361,6 +3363,11 @@
          }
          break;
 
+      case DW_CFA_ORCL_arg_loc:
+         if (di->ddump_frames)
+            VG_(printf)("  DW_CFA_ORCL_arg_loc\n");
+         break;
+
       default: 
          VG_(message)(Vg_DebugMsg, "DWARF2 CFI reader: unhandled CFI "
                                    "instruction 0:%d\n", (Int)lo6); 
@@ -3574,6 +3581,11 @@
          VG_(printf)("  sci:DW_CFA_GNU_window_save\n");
          break;
 
+      case DW_CFA_ORCL_arg_loc:
+         /* :TODO: Print all arguments when implemented in libdwarf. */
+         VG_(printf)("  sci:DW_CFA_ORCL_arg_loc\n");
+         break;
+
       default: 
          VG_(printf)("  sci:0:%d\n", (Int)lo6); 
          break;
@@ -4008,6 +4020,7 @@
             adi.ehframe_image = frame_image;
             adi.ehframe_avma  = frame_avma;
             adi.text_bias     = di->text_debug_bias;
+            adi.got_avma      = di->got_avma;
             show_CF_instructions( the_CIEs[this_CIE].instrs, 
                                   the_CIEs[this_CIE].ilen, &adi,
                                   the_CIEs[this_CIE].code_a_f,
@@ -4058,6 +4071,7 @@
          adi.ehframe_image = frame_image;
          adi.ehframe_avma  = frame_avma;
          adi.text_bias     = di->text_debug_bias;
+         adi.got_avma      = di->got_avma;
          fde_initloc = step_encoded_Addr(&adi, &data);
          if (di->trace_cfi) 
             VG_(printf)("fde.initloc     = %#lx\n", fde_initloc);
@@ -4066,6 +4080,7 @@
          adi.ehframe_image = frame_image;
          adi.ehframe_avma  = frame_avma;
          adi.text_bias     = di->text_debug_bias;
+         adi.got_avma      = di->got_avma;
 
          /* WAS (incorrectly):
             fde_arange = read_encoded_Addr(&nbytes, &adi, data);
@@ -4158,6 +4173,7 @@
          adi.ehframe_image = frame_image;
          adi.ehframe_avma  = frame_avma;
          adi.text_bias     = di->text_debug_bias;
+         adi.got_avma      = di->got_avma;
 
          if (di->trace_cfi)
             show_CF_instructions( fde_instrs, fde_ilen, &adi,
@@ -4214,7 +4230,7 @@
     return;
 }
 
-#endif // defined(VGO_linux) || defined(VGO_darwin)
+#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 /*--------------------------------------------------------------------*/
 /*--- end                                                          ---*/
diff --git a/coregrind/m_debuginfo/readdwarf3.c b/coregrind/m_debuginfo/readdwarf3.c
index 85070e4..553ef7c 100644
--- a/coregrind/m_debuginfo/readdwarf3.c
+++ b/coregrind/m_debuginfo/readdwarf3.c
@@ -35,7 +35,7 @@
    without prior written permission.
 */
 
-#if defined(VGO_linux) || defined(VGO_darwin)
+#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 /* REFERENCE (without which this code will not make much sense):
 
@@ -5268,7 +5268,7 @@
    TRACE_SYMTAB("\n");
 #endif
 
-#endif // defined(VGO_linux) || defined(VGO_darwin)
+#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 /*--------------------------------------------------------------------*/
 /*--- end                                                          ---*/
diff --git a/coregrind/m_debuginfo/readelf.c b/coregrind/m_debuginfo/readelf.c
index fda4161..c9498e8 100644
--- a/coregrind/m_debuginfo/readelf.c
+++ b/coregrind/m_debuginfo/readelf.c
@@ -29,7 +29,7 @@
    The GNU General Public License is contained in the file COPYING.
 */
 
-#if defined(VGO_linux)
+#if defined(VGO_linux) || defined(VGO_solaris)
 
 #include "pub_core_basics.h"
 #include "pub_core_vki.h"
@@ -54,6 +54,9 @@
 
 /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
 #include <elf.h>
+#if defined(VGO_solaris)
+#include <sys/link.h>              /* ElfXX_Dyn, DT_* */
+#endif
 /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
 
 /*------------------------------------------------------------*/
@@ -1844,7 +1847,7 @@
       OffT   foff = a_shdr.sh_offset;
       UWord  size = a_shdr.sh_size; /* Do not change this to be signed. */
       UInt   alyn = a_shdr.sh_addralign;
-      Bool   bits = !(a_shdr.sh_type == SHT_NOBITS);
+      Bool   nobits = a_shdr.sh_type == SHT_NOBITS;
       /* Look through our collection of info obtained from the PT_LOAD
          headers, and make 'inrx' and 'inrw' point to the first entry
          in each that intersects 'avma'.  If in each case none is found,
@@ -1870,9 +1873,9 @@
                    foff, foff+size-1, (void*)svma, name);
 
       /* Check for sane-sized segments.  SHT_NOBITS sections have zero
-         size in the file. */
-      if ((foff >= ML_(img_size)(mimg)) 
-          || (foff + (bits ? size : 0) > ML_(img_size)(mimg))) {
+         size in the file and their offsets are just conceptual. */
+      if (!nobits &&
+          (foff >= ML_(img_size)(mimg) || foff + size > ML_(img_size)(mimg))) {
          ML_(symerr)(di, True, "ELF Section extends beyond image end");
          goto out;
       }
@@ -2161,7 +2164,8 @@
 #     if defined(VGP_x86_linux) || defined(VGP_amd64_linux) \
          || defined(VGP_arm_linux) || defined (VGP_s390x_linux) \
          || defined(VGP_mips32_linux) || defined(VGP_mips64_linux) \
-         || defined(VGP_arm64_linux) || defined(VGP_tilegx_linux)
+         || defined(VGP_arm64_linux) || defined(VGP_tilegx_linux) \
+         || defined(VGP_x86_solaris) || defined(VGP_amd64_solaris)
       /* Accept .plt where mapped as rx (code) */
       if (0 == VG_(strcmp)(name, ".plt")) {
          if (inrx && !di->plt_present) {
@@ -2323,6 +2327,9 @@
       DiSlice symtab_escn         = DiSlice_INVALID; // .symtab
       DiSlice dynstr_escn         = DiSlice_INVALID; // .dynstr
       DiSlice dynsym_escn         = DiSlice_INVALID; // .dynsym
+#     if defined(VGO_solaris)
+      DiSlice ldynsym_escn        = DiSlice_INVALID; // .SUNW_ldynsym
+#     endif
       DiSlice debuglink_escn      = DiSlice_INVALID; // .gnu_debuglink
       DiSlice debugaltlink_escn   = DiSlice_INVALID; // .gnu_debugaltlink
       DiSlice debug_line_escn     = DiSlice_INVALID; // .debug_line   (dwarf2)
@@ -2385,8 +2392,8 @@
                              _sec_name, (ULong)_sec_escn.ioff, \
                              ((ULong)_sec_escn.ioff) + _sec_escn.szB - 1); \
                /* SHT_NOBITS sections have zero size in the file. */ \
-               if ( a_shdr.sh_offset \
-                    + (nobits ? 0 : _sec_escn.szB) > ML_(img_size)(mimg) ) { \
+               if (!nobits && \
+                   a_shdr.sh_offset + _sec_escn.szB > ML_(img_size)(mimg) ) { \
                   ML_(symerr)(di, True, \
                               "   section beyond image end?!"); \
                   goto out; \
@@ -2404,6 +2411,9 @@
          FIND(".dynstr",            dynstr_escn)
          FIND(".symtab",            symtab_escn)
          FIND(".strtab",            strtab_escn)
+#        if defined(VGO_solaris)
+         FIND(".SUNW_ldynsym",      ldynsym_escn)
+#        endif
 
          FIND(".gnu_debuglink",     debuglink_escn)
          FIND(".gnu_debugaltlink",  debugaltlink_escn)
@@ -2680,8 +2690,8 @@
                                 (ULong)_sec_escn.ioff, \
                                 ((ULong)_sec_escn.ioff) + _sec_escn.szB - 1); \
                   /* SHT_NOBITS sections have zero size in the file. */ \
-                  if (a_shdr.sh_offset \
-                      + (nobits ? 0 : _sec_escn.szB) > ML_(img_size)(dimg)) { \
+                  if (!nobits && a_shdr.sh_offset \
+                      + _sec_escn.szB > ML_(img_size)(dimg)) { \
                      ML_(symerr)(di, True, \
                                  "   section beyond image end?!"); \
                      goto out; \
@@ -2840,6 +2850,9 @@
       /* Check some sizes */
       vg_assert((dynsym_escn.szB % sizeof(ElfXX_Sym)) == 0);
       vg_assert((symtab_escn.szB % sizeof(ElfXX_Sym)) == 0);
+#     if defined(VGO_solaris)
+      vg_assert((ldynsym_escn.szB % sizeof(ElfXX_Sym)) == 0);
+#     endif
 
       /* TOPLEVEL */
       /* Read symbols */
@@ -2859,6 +2872,11 @@
          read_elf_symtab(di, "dynamic symbol table",
                          &dynsym_escn, &dynstr_escn, &opd_escn,
                          False);
+#        if defined(VGO_solaris)
+         read_elf_symtab(di, "local dynamic symbol table",
+                         &ldynsym_escn, &dynstr_escn, &opd_escn,
+                         False);
+#        endif
       }
 
       /* TOPLEVEL */
@@ -3010,7 +3028,7 @@
    /* NOTREACHED */
 }
 
-#endif // defined(VGO_linux)
+#endif // defined(VGO_linux) || defined(VGO_solaris)
 
 /*--------------------------------------------------------------------*/
 /*--- end                                                          ---*/
diff --git a/coregrind/m_debuginfo/readpdb.c b/coregrind/m_debuginfo/readpdb.c
index 3a0d422..cc81831 100644
--- a/coregrind/m_debuginfo/readpdb.c
+++ b/coregrind/m_debuginfo/readpdb.c
@@ -35,7 +35,7 @@
    The GNU General Public License is contained in the file COPYING.
 */
 
-#if defined(VGO_linux) || defined(VGO_darwin)
+#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 #include "pub_core_basics.h"
 #include "pub_core_debuginfo.h"
@@ -2584,7 +2584,7 @@
    return res;
 }
 
-#endif // defined(VGO_linux) || defined(VGO_darwin)
+#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 /*--------------------------------------------------------------------*/
 /*--- end                                                          ---*/
diff --git a/coregrind/m_debuginfo/storage.c b/coregrind/m_debuginfo/storage.c
index f4b1d2f..c455ca3 100644
--- a/coregrind/m_debuginfo/storage.c
+++ b/coregrind/m_debuginfo/storage.c
@@ -1457,7 +1457,7 @@
    vlena = VG_(strlen)(a_name);
    vlenb = VG_(strlen)(b_name);
 
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
 #    define VERSION_CHAR '@'
 #  elif defined(VGO_darwin)
 #    define VERSION_CHAR '$'
diff --git a/coregrind/m_debuglog.c b/coregrind/m_debuglog.c
index 469c694..66f567c 100644
--- a/coregrind/m_debuglog.c
+++ b/coregrind/m_debuglog.c
@@ -56,6 +56,9 @@
 #include "pub_core_vkiscnums.h"  /* for syscall numbers */
 #include "pub_core_debuglog.h"   /* our own iface */
 #include "pub_core_clreq.h"      /* for RUNNING_ON_VALGRIND */
+#if defined(VGO_solaris)
+#include "pub_core_vki.h"        /* for EINTR and ERESTART */
+#endif
 
 static Bool clo_xml;
 
@@ -556,6 +559,97 @@
   return __res;
 }
 
+#elif defined(VGP_x86_solaris)
+static UInt local_sys_write_stderr ( const HChar* buf, Int n )
+{
+   UInt res, err;
+   Bool restart;
+
+   do {
+      /* The Solaris kernel does not restart syscalls automatically so it is
+         done here. */
+      __asm__ __volatile__ (
+         "movl  %[n], %%eax\n"          /* push n */
+         "pushl %%eax\n"
+         "movl  %[buf], %%eax\n"        /* push buf */
+         "pushl %%eax\n"
+         "movl  $2, %%eax\n"            /* push stderr */
+         "pushl %%eax\n"
+         "movl  $"VG_STRINGIFY(__NR_write)", %%eax\n"
+         "pushl %%eax\n"                /* push fake return address */
+         "int   $0x91\n"                /* write(stderr, buf, n) */
+         "movl  $0, %%edx\n"            /* assume no error */
+         "jnc   1f\n"                   /* jump if no error */
+         "movl  $1, %%edx\n"            /* set error flag */
+         "1: "
+         "addl  $16, %%esp\n"           /* pop x4 */
+         : "=&a" (res), "=d" (err)
+         : [buf] "g" (buf), [n] "g" (n)
+         : "cc");
+      restart = err && (res == VKI_EINTR || res == VKI_ERESTART);
+   } while (restart);
+
+   return res;
+}
+
+static UInt local_sys_getpid ( void )
+{
+   UInt res;
+
+   /* The getpid() syscall never returns EINTR or ERESTART so there is no need
+      for restarting it. */
+   __asm__ __volatile__ (
+      "movl $"VG_STRINGIFY(__NR_getpid)", %%eax\n"
+      "int  $0x91\n"                    /* getpid() */
+      : "=a" (res)
+      :
+      : "edx", "cc");
+
+   return res;
+}
+
+#elif defined(VGP_amd64_solaris)
+static UInt local_sys_write_stderr ( const HChar* buf, Int n )
+{
+   ULong res, err;
+   Bool restart;
+
+   do {
+      /* The Solaris kernel does not restart syscalls automatically so it is
+         done here. */
+      __asm__ __volatile__ (
+         "movq  $2, %%rdi\n"            /* push stderr */
+         "movq  $"VG_STRINGIFY(__NR_write)", %%rax\n"
+         "syscall\n"                    /* write(stderr, buf, n) */
+         "movq  $0, %%rdx\n"            /* assume no error */
+         "jnc   1f\n"                   /* jump if no error */
+         "movq  $1, %%rdx\n"            /* set error flag */
+         "1: "
+         : "=a" (res), "=d" (err)
+         : "S" (buf), "d" (n)
+         : "cc");
+      restart = err && (res == VKI_EINTR || res == VKI_ERESTART);
+   } while (restart);
+
+   return res;
+}
+
+static UInt local_sys_getpid ( void )
+{
+   UInt res;
+
+   /* The getpid() syscall never returns EINTR or ERESTART so there is no need
+      for restarting it. */
+   __asm__ __volatile__ (
+      "movq $"VG_STRINGIFY(__NR_getpid)", %%rax\n"
+      "syscall\n"                       /* getpid() */
+      : "=a" (res)
+      :
+      : "edx", "cc");
+
+   return res;
+}
+
 #else
 # error Unknown platform
 #endif
diff --git a/coregrind/m_demangle/demangle.c b/coregrind/m_demangle/demangle.c
index 1054c14..883e164 100644
--- a/coregrind/m_demangle/demangle.c
+++ b/coregrind/m_demangle/demangle.c
@@ -282,6 +282,7 @@
          case 'D': EMITSO('$'); break;
          case 'L': EMITSO('('); break;
          case 'R': EMITSO(')'); break;
+         case 'S': EMITSO('/'); break;
          case 'Z': EMITSO('Z'); break;
          default: error = True; goto out;
       }
diff --git a/coregrind/m_dispatch/dispatch-amd64-solaris.S b/coregrind/m_dispatch/dispatch-amd64-solaris.S
new file mode 100644
index 0000000..740505d
--- /dev/null
+++ b/coregrind/m_dispatch/dispatch-amd64-solaris.S
@@ -0,0 +1,256 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The core dispatch loop, for jumping to a code address.       ---*/
+/*---                                     dispatch-amd64-solaris.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+  This file is part of Valgrind, a dynamic binary instrumentation
+  framework.
+
+  Copyright (C) 2000-2013 Julian Seward 
+     jseward@acm.org
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+  02111-1307, USA.
+
+  The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_amd64_solaris)
+
+#include "pub_core_basics_asm.h"
+#include "pub_core_dispatch_asm.h"
+#include "pub_core_transtab_asm.h"
+#include "libvex_guest_offsets.h"	/* for OFFSET_amd64_RIP */
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- The dispatch loop.  VG_(disp_run_translations) is    ---*/
+/*--- used to run all translations,                        ---*/
+/*--- including no-redir ones.                             ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+/*----------------------------------------------------*/
+/*--- Entry and preamble (set everything up)       ---*/
+/*----------------------------------------------------*/
+
+/* signature:
+void VG_(disp_run_translations)( UWord* two_words,
+                                 void*  guest_state, 
+                                 Addr   host_addr );
+*/
+.text
+.globl VG_(disp_run_translations)
+.type  VG_(disp_run_translations), @function
+VG_(disp_run_translations):
+        /* %rdi holds two_words    */
+	/* %rsi holds guest_state  */
+	/* %rdx holds host_addr    */
+
+        /* The preamble */
+
+        /* Save integer registers, since this is a pseudo-function. */
+        pushq   %rax
+	pushq	%rbx
+	pushq	%rcx
+        pushq   %rdx
+	pushq	%rsi
+	pushq	%rbp
+	pushq	%r8
+	pushq	%r9
+	pushq	%r10
+	pushq	%r11
+	pushq	%r12
+	pushq	%r13
+	pushq	%r14
+	pushq	%r15
+        /* %rdi must be saved last */
+	pushq	%rdi
+
+        /* Get the host CPU in the state expected by generated code. */
+
+	/* set host FPU control word to the default mode expected 
+           by VEX-generated code.  See comments in libvex.h for
+           more info. */
+	finit
+	pushq	$0x027F
+	fldcw	(%rsp)
+	addq	$8, %rsp
+	
+	/* set host SSE control word to the default mode expected 
+	   by VEX-generated code. */
+	pushq	$0x1F80
+	ldmxcsr	(%rsp)
+	addq	$8, %rsp
+
+	/* set dir flag to known value */
+	cld
+
+	/* Set up the guest state pointer */
+	movq	%rsi, %rbp
+
+        /* and jump into the code cache.  Chained translations in
+           the code cache run, until for whatever reason, they can't
+           continue.  When that happens, the translation in question
+           will jump (or call) to one of the continuation points
+           VG_(cp_...) below. */
+        jmpq    *%rdx
+       	/*NOTREACHED*/	
+
+/*----------------------------------------------------*/
+/*--- Postamble and exit.                          ---*/
+/*----------------------------------------------------*/
+
+postamble:
+        /* At this point, %rax and %rdx contain two
+           words to be returned to the caller.  %rax
+           holds a TRC value, and %rdx optionally may
+           hold another word (for CHAIN_ME exits, the
+           address of the place to patch.) */
+        
+	/* We're leaving.  Check that nobody messed with %mxcsr
+           or %fpucw.  We can't mess with %rax or %rdx here as they
+           hold the tentative return values, but any others are OK. */
+#if !defined(ENABLE_INNER)
+        /* This check fails for self-hosting, so skip in that case */
+	pushq	$0
+	fstcw	(%rsp)
+	cmpl	$0x027F, (%rsp)
+	popq	%r15 /* get rid of the word without trashing %rflags */
+	jnz	invariant_violation
+#endif
+	pushq	$0
+	stmxcsr	(%rsp)
+	andl	$0xFFFFFFC0, (%rsp)  /* mask out status flags */
+	cmpl	$0x1F80, (%rsp)
+	popq	%r15
+	jnz	invariant_violation
+	/* otherwise we're OK */
+	jmp	remove_frame
+invariant_violation:
+	movq	$VG_TRC_INVARIANT_FAILED, %rax
+        movq    $0, %rdx
+
+remove_frame:
+        /* Pop %rdi, stash return values */
+	popq	%rdi
+        movq    %rax, 0(%rdi)
+        movq    %rdx, 8(%rdi)
+        /* Now pop everything else */
+	popq	%r15
+	popq	%r14
+	popq	%r13
+	popq	%r12
+	popq	%r11
+	popq	%r10
+	popq	%r9
+	popq	%r8
+	popq	%rbp
+	popq	%rsi
+	popq	%rdx
+	popq	%rcx
+	popq	%rbx
+	popq	%rax
+	ret	
+        
+/*----------------------------------------------------*/
+/*--- Continuation points                          ---*/
+/*----------------------------------------------------*/
+
+/* ------ Chain me to slow entry point ------ */
+.global VG_(disp_cp_chain_me_to_slowEP)
+VG_(disp_cp_chain_me_to_slowEP):
+        /* We got called.  The return address indicates
+           where the patching needs to happen.  Collect
+           the return address and, exit back to C land,
+           handing the caller the pair (Chain_me_S, RA) */
+        movq    $VG_TRC_CHAIN_ME_TO_SLOW_EP, %rax
+        popq    %rdx
+        /* 10 = movabsq $VG_(disp_chain_me_to_slowEP), %r11;
+           3  = call *%r11 */
+        subq    $10+3, %rdx
+        jmp     postamble
+
+/* ------ Chain me to fast entry point ------ */
+.global VG_(disp_cp_chain_me_to_fastEP)
+VG_(disp_cp_chain_me_to_fastEP):
+        /* We got called.  The return address indicates
+           where the patching needs to happen.  Collect
+           the return address and, exit back to C land,
+           handing the caller the pair (Chain_me_F, RA) */
+        movq    $VG_TRC_CHAIN_ME_TO_FAST_EP, %rax
+        popq    %rdx
+        /* 10 = movabsq $VG_(disp_chain_me_to_fastEP), %r11;
+           3  = call *%r11 */
+        subq    $10+3, %rdx
+        jmp     postamble
+
+/* ------ Indirect but boring jump ------ */
+.global VG_(disp_cp_xindir)
+VG_(disp_cp_xindir):
+	/* Where are we going? */
+	movq	OFFSET_amd64_RIP(%rbp), %rax
+
+        /* stats only */
+        addl    $1, VG_(stats__n_xindirs_32)
+        
+	/* try a fast lookup in the translation cache */
+	movabsq $VG_(tt_fast), %rcx
+	movq	%rax, %rbx		/* next guest addr */
+	andq	$VG_TT_FAST_MASK, %rbx	/* entry# */
+	shlq	$4, %rbx		/* entry# * sizeof(FastCacheEntry) */
+	movq	0(%rcx,%rbx,1), %r10	/* .guest */
+	movq	8(%rcx,%rbx,1), %r11	/* .host */
+	cmpq	%rax, %r10
+	jnz	fast_lookup_failed
+
+        /* Found a match.  Jump to .host. */
+	jmp 	*%r11
+	ud2	/* persuade insn decoders not to speculate past here */
+
+fast_lookup_failed:
+        /* stats only */
+        addl    $1, VG_(stats__n_xindir_misses_32)
+
+	movq	$VG_TRC_INNER_FASTMISS, %rax
+        movq    $0, %rdx
+	jmp	postamble
+
+/* ------ Assisted jump ------ */
+.global VG_(disp_cp_xassisted)
+VG_(disp_cp_xassisted):
+        /* %rbp contains the TRC */
+        movq    %rbp, %rax
+        movq    $0, %rdx
+        jmp     postamble
+
+/* ------ Event check failed ------ */
+.global VG_(disp_cp_evcheck_fail)
+VG_(disp_cp_evcheck_fail):
+       	movq	$VG_TRC_INNER_COUNTERZERO, %rax
+        movq    $0, %rdx
+	jmp	postamble
+
+
+.size VG_(disp_run_translations), .-VG_(disp_run_translations)
+
+#endif // defined(VGP_amd64_solaris)
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_dispatch/dispatch-x86-solaris.S b/coregrind/m_dispatch/dispatch-x86-solaris.S
new file mode 100644
index 0000000..3c7762b
--- /dev/null
+++ b/coregrind/m_dispatch/dispatch-x86-solaris.S
@@ -0,0 +1,247 @@
+
+/*--------------------------------------------------------------------*/
+/*--- The core dispatch loop, for jumping to a code address.       ---*/
+/*---                                       dispatch-x86-solaris.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+  This file is part of Valgrind, a dynamic binary instrumentation
+  framework.
+
+  Copyright (C) 2000-2012 Julian Seward 
+     jseward@acm.org
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+  02111-1307, USA.
+
+  The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_x86_solaris)
+
+#include "pub_core_basics_asm.h"
+#include "pub_core_dispatch_asm.h"
+#include "pub_core_transtab_asm.h"
+#include "libvex_guest_offsets.h"	/* for OFFSET_x86_EIP */
+
+
+/*------------------------------------------------------------*/
+/*---                                                      ---*/
+/*--- The dispatch loop.  VG_(disp_run_translations) is    ---*/
+/*--- used to run all translations,                        ---*/
+/*--- including no-redir ones.                             ---*/
+/*---                                                      ---*/
+/*------------------------------------------------------------*/
+
+/*----------------------------------------------------*/
+/*--- Entry and preamble (set everything up)       ---*/
+/*----------------------------------------------------*/
+
+/* signature:
+void VG_(disp_run_translations)( UWord* two_words,
+                                 void*  guest_state, 
+                                 Addr   host_addr );
+*/
+.text
+.globl VG_(disp_run_translations)
+.type  VG_(disp_run_translations), @function
+VG_(disp_run_translations):
+        /* 0(%esp) holds our return address. */
+	/* 4(%esp) holds two_words */
+	/* 8(%esp) holds guest_state */
+	/* 12(%esp) holds host_addr */
+
+        /* The preamble */
+
+        /* Save integer registers, since this is a pseudo-function. */
+        pushl   %eax
+	pushl	%ebx
+	pushl	%ecx
+	pushl	%edx
+	pushl	%esi
+	pushl	%edi
+	pushl	%ebp
+	
+	/* 28+4(%esp) holds two_words */
+	/* 28+8(%esp) holds guest_state */
+	/* 28+12(%esp) holds host_addr */
+
+        /* Get the host CPU in the state expected by generated code. */
+
+	/* set host FPU control word to the default mode expected 
+           by VEX-generated code.  See comments in libvex.h for
+           more info. */
+	finit
+	pushl	$0x027F
+	fldcw	(%esp)
+	addl	$4, %esp
+	
+	/* set host SSE control word to the default mode expected 
+	   by VEX-generated code. */
+	cmpl	$0, VG_(machine_x86_have_mxcsr)
+	jz	L1
+	pushl	$0x1F80
+	ldmxcsr	(%esp)
+	addl	$4, %esp
+L1:
+	/* set dir flag to known value */
+	cld
+
+	/* Set up the guest state pointer */
+	movl	28+8(%esp), %ebp
+
+        /* and jump into the code cache.  Chained translations in
+           the code cache run, until for whatever reason, they can't
+           continue.  When that happens, the translation in question
+           will jump (or call) to one of the continuation points
+           VG_(cp_...) below. */
+        jmpl    *28+12(%esp)
+	/*NOTREACHED*/
+
+/*----------------------------------------------------*/
+/*--- Postamble and exit.                          ---*/
+/*----------------------------------------------------*/
+
+postamble:
+        /* At this point, %eax and %edx contain two
+           words to be returned to the caller.  %eax
+           holds a TRC value, and %edx optionally may
+           hold another word (for CHAIN_ME exits, the
+           address of the place to patch.) */
+
+	/* We're leaving.  Check that nobody messed with %mxcsr
+           or %fpucw.  We can't mess with %eax or %edx here as they
+	   holds the tentative return value, but any others are OK. */
+#if !defined(ENABLE_INNER)
+        /* This check fails for self-hosting, so skip in that case */
+	pushl	$0
+	fstcw	(%esp)
+	cmpl	$0x027F, (%esp)
+	popl	%esi /* get rid of the word without trashing %eflags */
+	jnz	invariant_violation
+#endif
+#	cmpl	$0, VG_(machine_x86_have_mxcsr)
+	jz	L2
+	pushl	$0
+	stmxcsr	(%esp)
+	andl	$0xFFFFFFC0, (%esp)  /* mask out status flags */
+	cmpl	$0x1F80, (%esp)
+	popl	%esi
+	jnz	invariant_violation
+L2:	/* otherwise we're OK */
+	jmp	remove_frame
+invariant_violation:
+	movl	$VG_TRC_INVARIANT_FAILED, %eax
+        movl    $0, %edx
+
+remove_frame:
+        /* Stash return values */
+        movl    28+4(%esp), %edi        /* two_words */
+        movl    %eax, 0(%edi)
+        movl    %edx, 4(%edi)
+        /* Restore int regs and return. */
+	popl	%ebp
+	popl	%edi
+	popl	%esi
+	popl	%edx
+	popl	%ecx
+	popl	%ebx
+	popl	%eax
+	ret	
+        
+/*----------------------------------------------------*/
+/*--- Continuation points                          ---*/
+/*----------------------------------------------------*/
+
+/* ------ Chain me to slow entry point ------ */
+.global VG_(disp_cp_chain_me_to_slowEP)
+VG_(disp_cp_chain_me_to_slowEP):
+        /* We got called.  The return address indicates
+           where the patching needs to happen.  Collect
+           the return address and, exit back to C land,
+           handing the caller the pair (Chain_me_S, RA) */
+        movl    $VG_TRC_CHAIN_ME_TO_SLOW_EP, %eax
+        popl    %edx
+        /* 5 = movl $VG_(disp_chain_me_to_slowEP), %edx;
+           2 = call *%edx */
+        subl    $5+2, %edx
+        jmp     postamble
+
+/* ------ Chain me to fast entry point ------ */
+.global VG_(disp_cp_chain_me_to_fastEP)
+VG_(disp_cp_chain_me_to_fastEP):
+        /* We got called.  The return address indicates
+           where the patching needs to happen.  Collect
+           the return address and, exit back to C land,
+           handing the caller the pair (Chain_me_F, RA) */
+        movl    $VG_TRC_CHAIN_ME_TO_FAST_EP, %eax
+        popl    %edx
+        /* 5 = movl $VG_(disp_chain_me_to_fastEP), %edx;
+           2 = call *%edx */
+        subl    $5+2, %edx
+        jmp     postamble
+
+/* ------ Indirect but boring jump ------ */
+.global VG_(disp_cp_xindir)
+VG_(disp_cp_xindir):
+	/* Where are we going? */
+	movl	OFFSET_x86_EIP(%ebp), %eax
+
+        /* stats only */
+        addl    $1, VG_(stats__n_xindirs_32)
+        
+        /* try a fast lookup in the translation cache */
+        movl    %eax, %ebx                      /* next guest addr */
+        andl    $VG_TT_FAST_MASK, %ebx          /* entry# */
+        movl    0+VG_(tt_fast)(,%ebx,8), %esi   /* .guest */
+        movl    4+VG_(tt_fast)(,%ebx,8), %edi   /* .host */
+        cmpl    %eax, %esi
+        jnz     fast_lookup_failed
+
+        /* Found a match.  Jump to .host. */
+	jmp 	*%edi
+	ud2	/* persuade insn decoders not to speculate past here */
+
+fast_lookup_failed:
+        /* stats only */
+        addl    $1, VG_(stats__n_xindir_misses_32)
+
+	movl	$VG_TRC_INNER_FASTMISS, %eax
+        movl    $0, %edx
+	jmp	postamble
+
+/* ------ Assisted jump ------ */
+.global VG_(disp_cp_xassisted)
+VG_(disp_cp_xassisted):
+        /* %ebp contains the TRC */
+        movl    %ebp, %eax
+        movl    $0, %edx
+        jmp     postamble
+
+/* ------ Event check failed ------ */
+.global VG_(disp_cp_evcheck_fail)
+VG_(disp_cp_evcheck_fail):
+       	movl	$VG_TRC_INNER_COUNTERZERO, %eax
+        movl    $0, %edx
+	jmp	postamble
+
+
+.size VG_(disp_run_translations), .-VG_(disp_run_translations)
+
+#endif // defined(VGP_x86_solaris)
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_gdbserver/m_gdbserver.c b/coregrind/m_gdbserver/m_gdbserver.c
index 263bd4a..fba0a2b 100644
--- a/coregrind/m_gdbserver/m_gdbserver.c
+++ b/coregrind/m_gdbserver/m_gdbserver.c
@@ -828,6 +828,7 @@
 
 static void give_control_back_to_vgdb(void)
 {
+#if !defined(VGO_solaris)
    /* cause a SIGSTOP to be sent to ourself, so that vgdb takes control.
       vgdb will then restore the stack so as to resume the activity
       before the ptrace (typically do_syscall_WRK). */
@@ -842,6 +843,15 @@
               "vgdb did not took control. Did you kill vgdb ?\n"
               "busy %d vgdb_interrupted_tid %d\n",
               busy, vgdb_interrupted_tid);
+#else /* defined(VGO_solaris) */
+   /* On Solaris, this code is run within the context of an agent thread
+      (see vgdb-invoker-solaris.c and "PCAGENT" control message in
+      proc(4)). Exit the agent thread now.
+    */
+   SysRes sres = VG_(do_syscall0)(SYS_lwp_exit);
+   if (sr_isError(sres))
+      vg_assert2(0, "The agent thread could not be exited\n");
+#endif /* !defined(VGO_solaris) */
 }
 
 /* Using ptrace calls, vgdb will force an invocation of gdbserver.
diff --git a/coregrind/m_initimg/initimg-solaris.c b/coregrind/m_initimg/initimg-solaris.c
new file mode 100644
index 0000000..4db11a8
--- /dev/null
+++ b/coregrind/m_initimg/initimg-solaris.c
@@ -0,0 +1,1007 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Startup: create initial process image on Solaris             ---*/
+/*---                                            initimg-solaris.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2011-2014 Petr Pavlu
+      setup@dagobah.cz
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGO_solaris)
+
+/* Note: This file is based on initimg-linux.c. */
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcfile.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_mallocfree.h"
+#include "pub_core_machine.h"
+#include "pub_core_ume.h"
+#include "pub_core_options.h"
+#include "pub_core_tooliface.h"       /* VG_TRACK */
+#include "pub_core_threadstate.h"     /* ThreadArchState */
+#include "priv_initimg_pathscan.h"
+#include "pub_core_initimg.h"         /* self */
+
+
+/*====================================================================*/
+/*=== Loading the client                                           ===*/
+/*====================================================================*/
+
+/* Load the client whose name is VG_(argv_the_exename). */
+static void load_client(/*OUT*/ExeInfo *info,
+                        /*OUT*/HChar *out_exe_name, SizeT out_exe_name_size)
+{
+   const HChar *exe_name;
+   Int ret;
+   SysRes res;
+
+   vg_assert(VG_(args_the_exename));
+   exe_name = ML_(find_executable)(VG_(args_the_exename));
+
+   if (!exe_name) {
+      VG_(printf)("valgrind: %s: command not found\n", VG_(args_the_exename));
+      /* Return POSIX's NOTFOUND. */
+      VG_(exit)(127);
+      /*NOTREACHED*/
+   }
+
+   VG_(memset)(info, 0, sizeof(*info));
+   ret = VG_(do_exec)(exe_name, info);
+   if (ret < 0) {
+      VG_(printf)("valgrind: could not execute '%s'\n", exe_name);
+      VG_(exit)(1);
+      /*NOTREACHED*/
+   }
+
+   /* The client was successfully loaded!  Continue. */
+
+   /* Save resolved exename. */
+   if (VG_(strlen)(exe_name) + 1 > out_exe_name_size) {
+      /* This should not really happen. */
+      VG_(printf)("valgrind: execname %s is too long\n", exe_name);
+      VG_(exit)(1);
+      /*NOTREACHED*/
+   }
+   VG_(strcpy)(out_exe_name, exe_name);
+
+   /* Get hold of a file descriptor which refers to the client executable.
+      This is needed for attaching to GDB. */
+   res = VG_(open)(exe_name, VKI_O_RDONLY, VKI_S_IRUSR);
+   if (!sr_isError(res))
+      VG_(cl_exec_fd) = sr_Res(res);
+
+   /* Set initial brk values. */
+   VG_(brk_base) = VG_(brk_limit) = info->brkbase;
+}
+
+
+/*====================================================================*/
+/*=== Setting up the client's environment                          ===*/
+/*====================================================================*/
+
+/* Prepare the client's environment.  This is basically a copy of our
+   environment, except:
+
+     LD_PRELOAD=$VALGRIND_LIB/vgpreload_core-PLATFORM.so:
+                ($VALGRIND_LIB/vgpreload_TOOL-PLATFORM.so:)?
+                $LD_PRELOAD
+
+   If this is missing, then it is added.
+
+   Also, remove any binding for VALGRIND_LAUNCHER=.  The client should not be
+   able to see this.
+
+   If this needs to handle any more variables it should be hacked into
+   something table driven.  The copy is VG_(malloc)'d space.
+*/
+static HChar **setup_client_env(HChar **origenv, const HChar *toolname)
+{
+   const HChar *ld_preload = "LD_PRELOAD=";
+   SizeT ld_preload_len = VG_(strlen)(ld_preload);
+   Bool ld_preload_done = False;
+   SizeT vglib_len = VG_(strlen)(VG_(libdir));
+
+   HChar **cpp;
+   HChar **ret;
+   HChar *preload_tool_path;
+   SizeT envc, i;
+
+   /* Alloc space for the
+        <path>/vgpreload_core-<platform>.so and
+        <path>/vgpreload_<tool>-<platform>.so
+      paths.  We might not need the space for the tool path, but it doesn't
+      hurt to over-allocate briefly.  */
+   SizeT preload_core_path_size = vglib_len + sizeof("/vgpreload_core-") - 1
+                                            + sizeof(VG_PLATFORM) - 1
+                                            + sizeof(".so");
+   SizeT preload_tool_path_size = vglib_len + sizeof("/vgpreload_") - 1
+                                            + VG_(strlen)(toolname) + 1 /*-*/
+                                            + sizeof(VG_PLATFORM) - 1
+                                            + sizeof(".so");
+   SizeT preload_string_size = preload_core_path_size
+                               + preload_tool_path_size;
+   HChar *preload_string = VG_(malloc)("initimg-solaris.sce.1",
+                                       preload_string_size);
+
+   /* Check that the parameters are sane. */
+   vg_assert(origenv);
+   vg_assert(toolname);
+
+   /* Determine if there's a vgpreload_<tool>-<platform>.so file, and setup
+      preload_string. */
+   preload_tool_path = VG_(malloc)("initimg-solaris.sce.2",
+                                   preload_tool_path_size);
+   VG_(sprintf)(preload_tool_path, "%s/vgpreload_%s-%s.so", VG_(libdir),
+                toolname, VG_PLATFORM);
+   if (!VG_(access)(preload_tool_path, True/*r*/, False/*w*/, False/*x*/)) {
+      /* The tool's .so exists, put it into LD_PRELOAD with the core's so. */
+      VG_(sprintf)(preload_string, "%s/vgpreload_core-%s.so:%s", VG_(libdir),
+                   VG_PLATFORM, preload_tool_path);
+   }
+   else {
+      /* The tool's .so doesn't exist, put only the core's .so into
+         LD_PRELOAD. */
+      VG_(sprintf)(preload_string, "%s/vgpreload_core-%s.so", VG_(libdir),
+                   VG_PLATFORM);
+   }
+   VG_(free)(preload_tool_path);
+
+   VG_(debugLog)(2, "initimg", "preload_string:\n");
+   VG_(debugLog)(2, "initimg", "  \"%s\"\n", preload_string);
+
+   /* Count the original size of the env. */
+   envc = 0;
+   for (cpp = origenv; *cpp; cpp++)
+      envc++;
+
+   /* Allocate a new space, envc + 1 new entry + NULL. */
+   ret = VG_(malloc)("initimg-solaris.sce.3", sizeof(HChar*) * (envc + 1 + 1));
+
+   /* Copy it over. */
+   for (cpp = ret; *origenv; )
+      *cpp++ = *origenv++;
+   *cpp = NULL;
+
+   vg_assert(envc == cpp - ret);
+
+   /* Walk over the new environment, mashing as we go. */
+   for (cpp = ret; *cpp; cpp++) {
+      if (VG_(memcmp)(*cpp, ld_preload, ld_preload_len))
+         continue;
+
+      /* LD_PRELOAD entry found, smash it. */
+      SizeT size = VG_(strlen)(*cpp) + 1 /*:*/
+                                     + preload_string_size;
+      HChar *cp = VG_(malloc)("initimg-solaris.sce.4", size);
+
+      VG_(sprintf)(cp, "%s%s:%s", ld_preload, preload_string,
+                   (*cpp) + ld_preload_len);
+      *cpp = cp;
+
+      ld_preload_done = True;
+   }
+
+   /* Add the missing bits. */
+   if (!ld_preload_done) {
+      SizeT size = ld_preload_len + preload_string_size;
+      HChar *cp = VG_(malloc)("initimg-solaris.sce.5", size);
+
+      VG_(sprintf)(cp, "%s%s", ld_preload, preload_string);
+      ret[envc++] = cp;
+   }
+
+   /* We've got ret[0 .. envc-1] live now. */
+
+   /* Find and remove a binding for VALGRIND_LAUNCHER. */
+   {
+      const HChar *v_launcher = VALGRIND_LAUNCHER "=";
+      SizeT v_launcher_len = VG_(strlen)(v_launcher);
+
+      for (i = 0; i < envc; i++)
+         if (!VG_(memcmp(ret[i], v_launcher, v_launcher_len))) {
+            /* VALGRIND_LAUNCHER was found. */
+            break;
+         }
+
+      if (i < envc) {
+         /* VALGRIND_LAUNCHER was found, remove it. */
+         for (; i < envc - 1; i++)
+            ret[i] = ret[i + 1];
+         envc--;
+      }
+   }
+
+   VG_(free)(preload_string);
+   ret[envc] = NULL;
+
+   return ret;
+}
+
+
+/*====================================================================*/
+/*=== Setting up the client's stack                                ===*/
+/*====================================================================*/
+
+/* Add a string onto the string table, and return its address. */
+static HChar *copy_str(HChar **tab, const HChar *str)
+{
+   HChar *cp = *tab;
+   HChar *orig = cp;
+
+   while (*str)
+      *cp++ = *str++;
+   *cp++ = '\0';
+
+   *tab = cp;
+
+   return orig;
+}
+
+
+/* This sets up the client's initial stack, containing the args,
+   environment and aux vector.
+
+   The format of the stack is:
+
+   higher address +-----------------+ <- clstack_end
+                  |                 |
+                  : string table    :
+                  |                 |
+                  +-----------------+
+                  | AT_NULL         |
+                  -                 -
+                  | auxv            |
+                  +-----------------+
+                  | NULL            |
+                  -                 -
+                  | envp            |
+                  +-----------------+
+                  | NULL            |
+                  -                 -
+                  | argv            |
+                  +-----------------+
+                  | argc            |
+   lower address  +-----------------+ <- sp
+                  | undefined       |
+                  :                 :
+
+   Allocate and create the initial client stack.  It is allocated down from
+   clstack_end, which was previously determined by the address space manager.
+   The returned value is the SP value for the client.
+
+   Note that no aux vector is created by kernel on Solaris if the program is
+   statically linked (which is our case).  That means we have to build auxv
+   from scratch. */
+
+static Addr setup_client_stack(void *init_sp,
+                               HChar **orig_envp,
+                               const ExeInfo *info,
+                               Addr clstack_end,
+                               SizeT clstack_max_size,
+                               const HChar *resolved_exe_name)
+{
+   SysRes res;
+   HChar **cpp;
+   HChar *strtab;       /* string table */
+   HChar *stringbase;
+   Addr *ptr;
+   vki_auxv_t *auxv;
+   SizeT stringsize;    /* total size of strings in bytes */
+   SizeT auxsize;       /* total size of auxv in bytes */
+   Int argc;            /* total argc */
+   Int envc;            /* total number of env vars */
+   SizeT stacksize;     /* total client stack size */
+   Addr client_SP;      /* client stack base (initial SP) */
+   Addr clstack_start;
+   Int i;
+
+   vg_assert(VG_IS_PAGE_ALIGNED(clstack_end + 1));
+   vg_assert(VG_(args_the_exename));
+   vg_assert(VG_(args_for_client));
+
+   /* ==================== compute sizes ==================== */
+
+   /* First of all, work out how big the client stack will be. */
+   stringsize = 0;
+
+   /* Paste on the extra args if the loader needs them (i.e. the #!
+      interpreter and its argument). */
+   argc = 0;
+   if (info->interp_name) {
+      argc++;
+      stringsize += VG_(strlen)(info->interp_name) + 1;
+   }
+   if (info->interp_args) {
+      argc++;
+      stringsize += VG_(strlen)(info->interp_args) + 1;
+   }
+
+   /* Now scan the args we're given... */
+   argc++;
+   stringsize += VG_(strlen)(VG_(args_the_exename)) + 1;
+   for (i = 0; i < VG_(sizeXA)(VG_(args_for_client)); i++) {
+      argc++;
+      stringsize += VG_(strlen)(*(HChar**)
+                                  VG_(indexXA)(VG_(args_for_client), i)) + 1;
+   }
+
+   /* ...and the environment. */
+   envc = 0;
+   for (cpp = orig_envp; *cpp; cpp++) {
+      envc++;
+      stringsize += VG_(strlen)(*cpp) + 1;
+   }
+
+   /* Now, how big is the auxv?
+
+      AT_SUN_PLATFORM
+      AT_SUN_EXECNAME
+      AT_PHDR
+      AT_BASE
+      AT_FLAGS
+      AT_PAGESZ
+      AT_SUN_AUXFLAFGS
+      AT_SUN_HWCAP
+      AT_NULL
+
+      It would be possible to also add AT_PHENT, AT_PHNUM, AT_ENTRY,
+      AT_SUN_LDDATA, but they don't seem to be so important. */
+   auxsize = 9 * sizeof(*auxv);
+#  if defined(VGA_x86) || defined(VGA_amd64)
+   /* AT_SUN_PLATFORM string. */
+   stringsize += VG_(strlen)("i86pc") + 1;
+#  else
+#    error "Unknown architecture"
+#  endif
+   /* AT_SUN_EXECNAME string. */
+   stringsize += VG_(strlen)(resolved_exe_name) + 1;
+
+   /* Calculate how big the client stack is. */
+   stacksize =
+      sizeof(Word) +                            /* argc */
+      sizeof(HChar**) +                         /* argc[0] == exename */
+      sizeof(HChar**) * argc +                  /* argv */
+      sizeof(HChar**) +                         /* terminal NULL */
+      sizeof(HChar**) * envc +                  /* envp */
+      sizeof(HChar**) +                         /* terminal NULL */
+      auxsize +                                 /* auxv */
+      VG_ROUNDUP(stringsize, sizeof(Word));     /* strings (aligned) */
+
+   /* The variable client_SP is the client's stack pointer. */
+   client_SP = clstack_end - stacksize;
+   client_SP = VG_ROUNDDN(client_SP, 16); /* Make stack 16 byte aligned. */
+
+   /* Calculate base of the string table (aligned). */
+   stringbase = (HChar*)clstack_end - VG_ROUNDUP(stringsize, sizeof(Int));
+   strtab = stringbase;
+
+   clstack_start = VG_PGROUNDDN(client_SP);
+
+   /* Calculate the max stack size. */
+   clstack_max_size = VG_PGROUNDUP(clstack_max_size);
+
+   /* Record stack extent -- needed for stack-change code. */
+   VG_(clstk_start_base) = clstack_start;
+   VG_(clstk_end) = clstack_end;
+   VG_(clstk_max_size) = clstack_max_size;
+
+   if (0)
+      VG_(printf)("stringsize=%lu, auxsize=%lu, stacksize=%lu, maxsize=%#lx\n"
+                  "clstack_start %#lx\n"
+                  "clstack_end   %#lx\n",
+                  stringsize, auxsize, stacksize, clstack_max_size,
+                  clstack_start, clstack_end);
+
+   /* ==================== allocate space ==================== */
+
+   {
+      SizeT anon_size = clstack_end - clstack_start + 1;
+      SizeT resvn_size = clstack_max_size - anon_size;
+      Addr anon_start = clstack_start;
+      Addr resvn_start = anon_start - resvn_size;
+      SizeT inner_HACK = 0;
+      Bool ok;
+
+      /* So far we've only accounted for space requirements down to the stack
+         pointer.  If this target's ABI requires a redzone below the stack
+         pointer, we need to allocate an extra page, to handle the worst case
+         in which the stack pointer is almost at the bottom of a page, and so
+         there is insufficient room left over to put the redzone in.  In this
+         case the simple thing to do is allocate an extra page, by shrinking
+         the reservation by one page and growing the anonymous area by a
+         corresponding page. */
+      vg_assert(VG_STACK_REDZONE_SZB >= 0);
+      vg_assert(VG_STACK_REDZONE_SZB < VKI_PAGE_SIZE);
+      if (VG_STACK_REDZONE_SZB > 0) {
+         vg_assert(resvn_size > VKI_PAGE_SIZE);
+         resvn_size -= VKI_PAGE_SIZE;
+         anon_start -= VKI_PAGE_SIZE;
+         anon_size += VKI_PAGE_SIZE;
+      }
+
+      vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
+      vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
+      vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
+      vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
+      vg_assert(resvn_start == clstack_end + 1 - clstack_max_size);
+
+#     ifdef ENABLE_INNER
+      /* Create 1M non-fault-extending stack. */
+      inner_HACK = 1024 * 1024;
+#     endif
+
+      if (0)
+         VG_(printf)("resvn_start=%#lx, resvn_size=%#lx\n"
+                     "anon_start=%#lx, anon_size=%#lx\n",
+                     resvn_start, resvn_size, anon_start, anon_size);
+
+      /* Create a shrinkable reservation followed by an anonymous segment.
+         Together these constitute a growdown stack. */
+      ok = VG_(am_create_reservation)(resvn_start,
+                                      resvn_size - inner_HACK,
+                                      SmUpper,
+                                      anon_size + inner_HACK);
+      if (ok) {
+         /* Allocate a stack - mmap enough space for the stack. */
+         res = VG_(am_mmap_anon_fixed_client)(anon_start - inner_HACK,
+                                              anon_size + inner_HACK,
+                                              info->stack_prot);
+      }
+      if (!ok || sr_isError(res)) {
+         /* Allocation of the stack failed.  We have to stop. */
+         VG_(printf)("valgrind: "
+                     "I failed to allocate space for the application's stack.\n");
+         VG_(printf)("valgrind: "
+                     "This may be the result of a very large --main-stacksize=\n");
+         VG_(printf)("valgrind: setting.  Cannot continue.  Sorry.\n\n");
+         VG_(exit)(1);
+         /*NOTREACHED*/
+      }
+   }
+
+   /* ==================== create client stack ==================== */
+
+   ptr = (Addr*)client_SP;
+
+   /* Copy-out client argc. */
+   *ptr++ = argc;
+
+   /* Copy-out client argv. */
+   if (info->interp_name) {
+      *ptr++ = (Addr)copy_str(&strtab, info->interp_name);
+      VG_(free)(info->interp_name);
+   }
+   if (info->interp_args) {
+      *ptr++ = (Addr)copy_str(&strtab, info->interp_args);
+      VG_(free)(info->interp_args);
+   }
+
+   *ptr++ = (Addr)copy_str(&strtab, VG_(args_the_exename));
+   for (i = 0; i < VG_(sizeXA)(VG_(args_for_client)); i++)
+      *ptr++ = (Addr)copy_str(
+                  &strtab, *(HChar**) VG_(indexXA)(VG_(args_for_client), i));
+   *ptr++ = 0;
+
+   /* Copy-out envp. */
+   VG_(client_envp) = (HChar**)ptr;
+   for (cpp = orig_envp; *cpp; ptr++, cpp++)
+      *ptr = (Addr)copy_str(&strtab, *cpp);
+   *ptr++ = 0;
+
+   /* Create aux vector. */
+   auxv = (auxv_t*)ptr;
+   VG_(client_auxv) = (UWord*)ptr;
+
+   /* AT_SUN_PLATFORM */
+   auxv->a_type = VKI_AT_SUN_PLATFORM;
+#  if defined(VGA_x86) || defined(VGA_amd64)
+   auxv->a_un.a_ptr = copy_str(&strtab, "i86pc");
+#  else
+#    error "Unknown architecture"
+#  endif
+   auxv++;
+
+   /* AT_SUN_EXECNAME */
+   auxv->a_type = VKI_AT_SUN_EXECNAME;
+   auxv->a_un.a_ptr = copy_str(&strtab, resolved_exe_name);
+   auxv++;
+
+   /* AT_PHDR */
+   if (info->phdr) {
+      auxv->a_type = VKI_AT_PHDR;
+      auxv->a_un.a_val = info->phdr;
+      auxv++;
+   }
+
+   /* AT_BASE */
+   auxv->a_type = VKI_AT_BASE;
+   auxv->a_un.a_val = info->interp_offset;
+   auxv++;
+
+   /* AT_FLAGS */
+   auxv->a_type = VKI_AT_FLAGS;
+#  if defined(VGA_x86) || defined(VGA_amd64)
+   auxv->a_un.a_val = 0; /* 0 on i86pc */
+#  else
+#    error "Unknown architecture"
+#  endif
+   auxv++;
+
+   /* AT_PAGESZ */
+   auxv->a_type = VKI_AT_PAGESZ;
+   auxv->a_un.a_val = VKI_PAGE_SIZE;
+   auxv++;
+
+   /* AT_SUN_AUXFLAFGS */
+   auxv->a_type = VKI_AT_SUN_AUXFLAGS;
+   /* XXX Handle AF_SUN_SETUGID? */
+   auxv->a_un.a_val = VKI_AF_SUN_HWCAPVERIFY;
+   auxv++;
+
+   /* AT_SUN_HWCAP */
+   {
+      VexArch vex_arch;
+      VexArchInfo vex_archinfo;
+      UInt hwcaps;
+
+      VG_(machine_get_VexArchInfo)(&vex_arch, &vex_archinfo);
+
+#     if defined(VGA_x86)
+      vg_assert(vex_arch == VexArchX86);
+
+      /* Set default hwcaps. */
+      hwcaps =
+           VKI_AV_386_FPU       /* x87-style floating point */
+         | VKI_AV_386_TSC       /* rdtsc insn */
+         | VKI_AV_386_CX8       /* cmpxchg8b insn */
+         | VKI_AV_386_SEP       /* sysenter and sysexit */
+         | VKI_AV_386_AMD_SYSC  /* AMD's syscall and sysret */
+         | VKI_AV_386_CMOV      /* conditional move insns */
+         | VKI_AV_386_MMX       /* MMX insn */
+         | VKI_AV_386_AHF;      /* lahf/sahf insns */
+
+      /* Handle additional hwcaps. */
+      if (vex_archinfo.hwcaps & VEX_HWCAPS_X86_SSE1)
+         hwcaps |=
+              VKI_AV_386_FXSR   /* fxsave and fxrstor */
+            | VKI_AV_386_SSE;   /* SSE insns and regs  */
+      if (vex_archinfo.hwcaps & VEX_HWCAPS_X86_SSE2) {
+         vg_assert(vex_archinfo.hwcaps & VEX_HWCAPS_X86_SSE1);
+         hwcaps |=
+              VKI_AV_386_SSE2;  /* SSE2 insns and regs */
+      }
+      if (vex_archinfo.hwcaps & VEX_HWCAPS_X86_SSE3) {
+         vg_assert(vex_archinfo.hwcaps & VEX_HWCAPS_X86_SSE2);
+         hwcaps |=
+              VKI_AV_386_SSE3   /* SSE3 insns and regs */
+            | VKI_AV_386_SSSE3; /* Intel SSSE3 insns */
+      }
+      if (vex_archinfo.hwcaps & VEX_HWCAPS_X86_LZCNT)
+         hwcaps |=
+              VKI_AV_386_AMD_LZCNT; /* AMD's LZCNT insn */
+
+      /* No support for:
+         AV_386_AMD_MMX         AMD's MMX insns
+         AV_386_AMD_3DNow       AMD's 3Dnow! insns
+         AV_386_AMD_3DNowx      AMD's 3Dnow! extended insns
+         AV_386_CX16            cmpxchg16b insn
+         AV_386_TSCP            rdtscp instruction
+         AV_386_AMD_SSE4A       AMD's SSE4A insns
+         AV_386_POPCNT          POPCNT insn
+         AV_386_SSE4_1          Intel SSE4.1 insns
+         AV_386_SSE4_2          Intel SSE4.2 insns
+         AV_386_MOVBE           Intel MOVBE insns
+         AV_386_AES             Intel AES insns
+         AV_386_PCLMULQDQ       Intel PCLMULQDQ insn
+         AV_386_XSAVE           Intel XSAVE/XRSTOR insns
+         AV_386_AVX             Intel AVX insns
+         illumos only:
+            AV_386_VMX          Intel VMX support
+            AV_386_AMD_SVM      AMD SVM support
+         solaris only:
+            AV_386_AMD_XOP      AMD XOP insns
+            AV_386_AMD_FMA4     AMD FMA4 insns */
+
+#     elif defined(VGA_amd64)
+      vg_assert(vex_arch == VexArchAMD64);
+
+      /* Set default hwcaps. */
+      hwcaps =
+           VKI_AV_386_FPU       /* x87-style floating point */
+         | VKI_AV_386_TSC       /* rdtsc insn */
+         | VKI_AV_386_CX8       /* cmpxchg8b insn */
+         | VKI_AV_386_AMD_SYSC  /* AMD's syscall and sysret */
+         | VKI_AV_386_CMOV      /* conditional move insns */
+         | VKI_AV_386_MMX       /* MMX insn */
+         | VKI_AV_386_AHF       /* lahf/sahf insns */
+         | VKI_AV_386_FXSR      /* fxsave and fxrstor */
+         | VKI_AV_386_SSE       /* SSE insns and regs  */
+         | VKI_AV_386_SSE2;     /* SSE2 insns and regs */
+
+      /* Handle additional hwcaps. */
+      if (vex_archinfo.hwcaps & VEX_HWCAPS_AMD64_SSE3)
+         hwcaps |=
+              VKI_AV_386_SSE3   /* SSE3 insns and regs */
+            | VKI_AV_386_SSSE3; /* Intel SSSE3 insns */
+      if (vex_archinfo.hwcaps & VEX_HWCAPS_AMD64_CX16)
+         hwcaps |=
+              VKI_AV_386_CX16;  /* cmpxchg16b insn */
+      if (vex_archinfo.hwcaps & VEX_HWCAPS_AMD64_LZCNT)
+         hwcaps |=
+              VKI_AV_386_AMD_LZCNT; /* AMD's LZCNT insn */
+      if (vex_archinfo.hwcaps & VEX_HWCAPS_AMD64_RDTSCP)
+         hwcaps |=
+              VKI_AV_386_TSCP;  /* rdtscp instruction */
+      if ((vex_archinfo.hwcaps & VEX_HWCAPS_AMD64_SSE3) &&
+          (vex_archinfo.hwcaps & VEX_HWCAPS_AMD64_CX16)) {
+         /* The CPUID simulation provided by VEX claims to have POPCNT, AES
+            and SSE4 (SSE4.1/SSE4.2) in the SSE3+CX16 configuration. */
+         hwcaps |=
+              VKI_AV_386_POPCNT /* POPCNT insn */
+            | VKI_AV_386_AES    /* Intel AES insns */
+            | VKI_AV_386_SSE4_1 /* Intel SSE4.1 insns */
+            | VKI_AV_386_SSE4_2; /* Intel SSE4.2 insns */
+      }
+      if ((vex_archinfo.hwcaps & VEX_HWCAPS_AMD64_SSE3) &&
+          (vex_archinfo.hwcaps & VEX_HWCAPS_AMD64_CX16) &&
+          (vex_archinfo.hwcaps & VEX_HWCAPS_AMD64_AVX)) {
+         /* The CPUID simulation provided by VEX claims to have PCLMULQDQ and
+            XSAVE in the SSE3+CX16+AVX configuration. */
+         hwcaps |=
+              VKI_AV_386_PCLMULQDQ /* Intel PCLMULQDQ insn */
+            | VKI_AV_386_XSAVE; /* Intel XSAVE/XRSTOR insns */
+      }
+      /* No support for:
+         AV_386_SEP             sysenter and sysexit
+         AV_386_AMD_MMX         AMD's MMX insns
+         AV_386_AMD_3DNow       AMD's 3Dnow! insns
+         AV_386_AMD_3DNowx      AMD's 3Dnow! extended insns
+         AV_386_AMD_SSE4A       AMD's SSE4A insns
+         AV_386_MOVBE           Intel MOVBE insns
+         AV_386_AVX             Intel AVX insns
+         illumos only:
+            AV_386_VMX          Intel VMX support
+            AV_386_AMD_SVM      AMD SVM support
+         solaris only:
+            AV_386_AMD_XOP      AMD XOP insns
+            AV_386_AMD_FMA4     AMD FMA4 insns
+
+         TODO VEX supports AVX, BMI and AVX2. Investigate if they can be
+         enabled on Solaris/illumos.
+       */
+
+#     else
+#       error "Unknown architecture"
+#     endif
+
+      auxv->a_type = VKI_AT_SUN_HWCAP;
+      auxv->a_un.a_val = hwcaps;
+      auxv++;
+   }
+
+   /* AT_SUN_HWCAP2 */
+   {
+      /* No support for:
+         illumos only:
+            AV_386_2_F16C       F16C half percision extensions
+            AV_386_2_RDRAND     RDRAND insn
+         solaris only:
+            AV2_386_RDRAND      Intel RDRAND insns
+            AV2_386_FMA         Intel FMA insn
+            AV2_386_F16C        IEEE half precn(float) insn
+            AV2_386_AMD_TBM     AMD TBM insn
+            AV2_386_BMI1        Intel BMI1 insn
+            AV2_386_FSGSBASE    Intel RD/WR FS/GSBASE insn
+            AV2_386_AVX2        Intel AVX2 insns
+            AV2_386_BMI2        Intel BMI2 insns
+            AV2_386_HLE         Intel HLE insns
+            AV2_386_RTM         Intel RTM insns
+            AV2_386_EFS         Intel Enhanced Fast String
+            AV2_386_RDSEED      Intel RDSEED insn
+            AV2_386_ADX         Intel ADX insns
+            AV2_386_PRFCHW      Intel PREFETCHW hint
+       */
+   }
+
+   /* AT_NULL */
+   auxv->a_type = VKI_AT_NULL;
+   auxv->a_un.a_val = 0;
+
+   vg_assert(strtab - stringbase == stringsize);
+
+   /* The variable client_SP is now pointing at client's argc/argv. */
+
+   if (0)
+      VG_(printf)("startup SP = %#lx\n", client_SP);
+   return client_SP;
+}
+
+
+/* Allocate the client data segment. It is an expandable anonymous mapping
+   abutting a 1-page reservation. The data segment starts at VG_(brk_base)
+   and runs up to VG_(brk_limit). None of these two values have to be
+   page-aligned.
+   Reservation segment is used to protect the data segment merging with
+   a pre-existing segment. This should be no problem because address space
+   manager ensures that requests for client address space are satisfied from
+   the highest available addresses. However when memory is low, data segment
+   can meet with mmap'ed objects and the reservation segment separates these.
+   The page that contains VG_(brk_base) is already allocated by the program's
+   loaded data segment. The brk syscall wrapper handles this special case.
+   See the brk syscall wrapper for more information. */
+static void setup_client_dataseg(SizeT initial_size)
+{
+   Bool ok;
+   SysRes sres;
+   Addr anon_start = VG_PGROUNDUP(VG_(brk_base));
+   SizeT anon_size = VG_PGROUNDUP(initial_size);
+   Addr resvn_start = anon_start + anon_size;
+   SizeT resvn_size = VKI_PAGE_SIZE;
+   const NSegment *seg;
+   UInt prot;
+
+   vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
+   vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
+   vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
+   vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
+
+   /* Stay sane (because there's been no brk activity yet). */
+   vg_assert(VG_(brk_base) == VG_(brk_limit));
+
+   /* Find the loaded data segment and remember its protection. */
+   seg = VG_(am_find_nsegment)(VG_(brk_base) - 1);
+   vg_assert(seg);
+   prot = (seg->hasR ? VKI_PROT_READ : 0)
+        | (seg->hasW ? VKI_PROT_WRITE : 0)
+        | (seg->hasX ? VKI_PROT_EXEC : 0);
+
+   /* Try to create the data segment and associated reservation where
+      VG_(brk_base) says. */
+   ok = VG_(am_create_reservation)(resvn_start, resvn_size, SmLower, anon_size);
+   if (!ok) {
+      /* That didn't work, we're hosed. */
+      VG_(printf)("valgrind: cannot initialize a brk segment\n");
+      VG_(exit)(1);
+      /*NOTREACHED*/
+   }
+   vg_assert(ok);
+
+   /* Map the data segment. */
+   sres = VG_(am_mmap_anon_fixed_client)(anon_start, anon_size, prot);
+   vg_assert(!sr_isError(sres));
+   vg_assert(sr_Res(sres) == anon_start);
+}
+
+
+/*====================================================================*/
+/*=== TOP-LEVEL: VG_(setup_client_initial_image)                   ===*/
+/*====================================================================*/
+
+/* Create the client's initial memory image. */
+IIFinaliseImageInfo VG_(ii_create_image)(IICreateImageInfo iicii,
+                                         const VexArchInfo *vex_archinfo)
+{
+   ExeInfo info;
+   HChar **env = NULL;
+   HChar resolved_exe_name[VKI_PATH_MAX];
+
+   IIFinaliseImageInfo iifii;
+   VG_(memset)(&iifii, 0, sizeof(iifii));
+
+   //--------------------------------------------------------------
+   // Load client executable, finding in $PATH if necessary
+   //   p: early_process_cmd_line_options()  [for 'exec', 'need_help']
+   //   p: layout_remaining_space            [so there's space]
+   //--------------------------------------------------------------
+   VG_(debugLog)(1, "initimg", "Loading client\n");
+
+   if (!VG_(args_the_exename)) {
+      VG_(err_missing_prog)();
+      /*NOTREACHED*/
+   }
+
+   load_client(&info, resolved_exe_name, sizeof(resolved_exe_name));
+   iifii.initial_client_IP = info.init_ip;
+   /* Note: TOC isn't available on Solaris. */
+   iifii.initial_client_TOC = info.init_toc;
+   iifii.initial_client_TP = info.init_thrptr;
+   /* Note that iifii.client_auxv is never set on Solaris, because it isn't
+      necessary to have this value in VG_(ii_finalise_image). */
+
+   //--------------------------------------------------------------
+   // Set up client's environment
+   //   p: set-libdir                       [for VG_(libdir)]
+   //   p: early_process_cmd_line_options() [for toolname]
+   //--------------------------------------------------------------
+   VG_(debugLog)(1, "initimg", "Setup client env\n");
+   env = setup_client_env(iicii.envp, iicii.toolname);
+
+   //--------------------------------------------------------------
+   // Setup client stack and EIP
+   //   p: load_client()     [for 'info']
+   //   p: fix_environment() [for 'env']
+   //--------------------------------------------------------------
+   {
+      /* When allocating space for the client stack, take notice of the
+         --main-stacksize value.  This makes it possible to run programs with
+         very large (primary) stack requirements simply by specifying
+         --main-stacksize. */
+      /* Logic is as follows:
+         - By default, use the client's current stack rlimit.
+         - If that exceeds 16M, clamp to 16M.
+         - If a larger --main-stacksize value is specified, use that instead.
+         - In all situations, the minimum allowed stack size is 1M.
+      */
+      void *init_sp = iicii.argv - 1;
+      SizeT m1  = 1024 * 1024;
+      SizeT m16 = 16 * m1;
+      SizeT szB = (SizeT)VG_(client_rlimit_stack).rlim_cur;
+      if (szB < m1)
+         szB = m1;
+      if (szB > m16)
+         szB = m16;
+
+      if (VG_(clo_main_stacksize) > 0)
+         szB = VG_(clo_main_stacksize);
+      if (szB < m1)
+         szB = m1;
+
+      szB = VG_PGROUNDUP(szB);
+      VG_(debugLog)(1, "initimg",
+                       "Setup client stack: size will be %ld\n", szB);
+
+      iifii.clstack_max_size = szB;
+      iifii.initial_client_SP = setup_client_stack(init_sp, env, &info,
+                                                   iicii.clstack_end,
+                                                   iifii.clstack_max_size,
+                                                   resolved_exe_name);
+      VG_(free)(env);
+
+      VG_(debugLog)(2, "initimg", "Client info: "
+                       "initial_IP=%#lx, initial_TOC=%#lx, brk_base=%#lx\n",
+                       iifii.initial_client_IP, iifii.initial_client_TOC,
+                       VG_(brk_base));
+      VG_(debugLog)(2, "initimg", "Client info: "
+                       "initial_SP=%#lx, max_stack_size=%lu\n",
+                       iifii.initial_client_SP,
+                       iifii.clstack_max_size);
+   }
+
+   //--------------------------------------------------------------
+   // Setup client data (brk) segment.  Initially segment at least
+   // 1 MB and at most 8 MB large which abuts a 1-page reservation.
+   //     p: load_client()     [for 'info' and hence VG_(brk_base)]
+   //--------------------------------------------------------------
+   {
+      SizeT m1 = 1024 * 1024;
+      SizeT m8 = 8 * m1;
+      SizeT dseg_max_size = VG_(client_rlimit_data).rlim_cur;
+      VG_(debugLog)(1, "initimg", "Setup client data (brk) segment at %#lx\n",
+                                  VG_(brk_base));
+      if (dseg_max_size < m1)
+         dseg_max_size = m1;
+      if (dseg_max_size > m8)
+         dseg_max_size = m8;
+      dseg_max_size = VG_PGROUNDUP(dseg_max_size);
+
+      setup_client_dataseg(dseg_max_size);
+   }
+
+   return iifii;
+}
+
+
+/*====================================================================*/
+/*=== TOP-LEVEL: VG_(finalise_image)                               ===*/
+/*====================================================================*/
+
+/* Just before starting the client, we may need to make final adjustments to
+   its initial image.  Also we need to set up the VEX guest state for thread 1
+   (the root thread) and copy in essential starting values.  This is handed
+   the IIFinaliseImageInfo created by VG_(ii_create_image).
+*/
+void VG_(ii_finalise_image)(IIFinaliseImageInfo iifii)
+{
+   ThreadArchState *arch = &VG_(threads)[1].arch;
+   const NSegment *seg;
+
+#  if defined(VGA_x86)
+   vg_assert(0 == sizeof(VexGuestX86State) % LibVEX_GUEST_STATE_ALIGN);
+
+   /* Zero out the initial state, and set up the simulated FPU in a sane
+      way. */
+   LibVEX_GuestX86_initialise(&arch->vex);
+
+   /* Zero out the shadow areas. */
+   VG_(memset)(&arch->vex_shadow1, 0, sizeof(VexGuestX86State));
+   VG_(memset)(&arch->vex_shadow2, 0, sizeof(VexGuestX86State));
+
+   /* Put essential stuff into the new state. */
+   arch->vex.guest_ESP = iifii.initial_client_SP;
+   arch->vex.guest_EIP = iifii.initial_client_IP;
+   LibVEX_GuestX86_put_eflags(VKI_PSL_USER, &arch->vex);
+
+   /* Set %cs, %ds, %ss and %es to default values. */
+   __asm__ __volatile__ ("movw %%cs, %[cs]" : [cs] "=m" (arch->vex.guest_CS));
+   __asm__ __volatile__ ("movw %%ds, %[ds]" : [ds] "=m" (arch->vex.guest_DS));
+   __asm__ __volatile__ ("movw %%ss, %[ss]" : [ss] "=m" (arch->vex.guest_SS));
+   __asm__ __volatile__ ("movw %%es, %[es]" : [es] "=m" (arch->vex.guest_ES));
+
+   {
+      /* Initial thread pointer value will be saved in GDT when the thread is
+         started in the syswrap module and a thread's GDT is allocated. */
+      ThreadOSstate *os = &VG_(threads)[1].os_state;
+      os->thrptr = iifii.initial_client_TP;
+   }
+
+#  elif defined(VGA_amd64)
+   vg_assert(0 == sizeof(VexGuestAMD64State) % LibVEX_GUEST_STATE_ALIGN);
+
+   /* Zero out the initial state, and set up the simulated FPU in a sane
+      way. */
+   LibVEX_GuestAMD64_initialise(&arch->vex);
+
+   /* Zero out the shadow areas. */
+   VG_(memset)(&arch->vex_shadow1, 0, sizeof(VexGuestAMD64State));
+   VG_(memset)(&arch->vex_shadow2, 0, sizeof(VexGuestAMD64State));
+
+   /* Put essential stuff into the new state. */
+   arch->vex.guest_RSP = iifii.initial_client_SP;
+   arch->vex.guest_RIP = iifii.initial_client_IP;
+   arch->vex.guest_FS_CONST = iifii.initial_client_TP;
+   LibVEX_GuestAMD64_put_rflags(VKI_PSL_USER, &arch->vex);
+
+#  else
+#    error "Unknown platform"
+#  endif
+
+   /* Tell the tool that we just wrote to the registers. */
+   VG_TRACK(post_reg_write, Vg_CoreStartup, 1/*tid*/, 0/*offset*/,
+            sizeof(VexGuestArchState));
+
+   /* Tell the tool about the client data segment and then kill it which will
+      make it inaccessible/unaddressable. */
+   seg = VG_(am_find_nsegment)(VG_PGROUNDUP(VG_(brk_base)));
+   vg_assert(seg);
+   vg_assert(seg->kind == SkAnonC);
+   VG_TRACK(new_mem_brk, VG_(brk_base), seg->end + 1 - VG_(brk_base),
+            1/*tid*/);
+   VG_TRACK(die_mem_brk, VG_(brk_base), seg->end + 1 - VG_(brk_base));
+}
+
+#endif // defined(VGO_solaris)
+
+/*--------------------------------------------------------------------*/
+/*---                                                              ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_libcassert.c b/coregrind/m_libcassert.c
index 566dd84..0545205 100644
--- a/coregrind/m_libcassert.c
+++ b/coregrind/m_libcassert.c
@@ -49,7 +49,8 @@
    Assertery.
    ------------------------------------------------------------------ */
 
-#if defined(VGP_x86_linux) || defined(VGP_x86_darwin)
+#if defined(VGP_x86_linux) || defined(VGP_x86_darwin) \
+    || defined(VGP_x86_solaris)
 #  define GET_STARTREGS(srP)                              \
       { UInt eip, esp, ebp;                               \
         __asm__ __volatile__(                             \
@@ -65,7 +66,8 @@
         (srP)->r_sp = (ULong)esp;                         \
         (srP)->misc.X86.r_ebp = ebp;                      \
       }
-#elif defined(VGP_amd64_linux) || defined(VGP_amd64_darwin)
+#elif defined(VGP_amd64_linux) || defined(VGP_amd64_darwin) \
+      || defined(VGP_amd64_solaris)
 #  define GET_STARTREGS(srP)                              \
       { ULong rip, rsp, rbp;                              \
         __asm__ __volatile__(                             \
@@ -291,7 +293,7 @@
 {
 #if defined(VGO_linux)
    (void)VG_(do_syscall1)(__NR_exit_group, status );
-#elif defined(VGO_darwin)
+#elif defined(VGO_darwin) || defined(VGO_solaris)
    (void)VG_(do_syscall1)(__NR_exit, status );
 #else
 #  error Unknown OS
@@ -505,8 +507,15 @@
 }
 
 /* Print some helpful-ish text about unimplemented things, and give up. */
-void VG_(unimplemented) ( const HChar* msg )
+void VG_(unimplemented) ( const HChar* format, ... )
 {
+   va_list vargs;
+   HChar msg[256];
+
+   va_start(vargs, format);
+   VG_(vsnprintf)(msg, sizeof(msg), format, vargs);
+   va_end(vargs);
+
    if (VG_(clo_xml))
       VG_(printf_xml)("</valgrindoutput>\n");
    VG_(umsg)("\n");
diff --git a/coregrind/m_libcfile.c b/coregrind/m_libcfile.c
index ce442ec..8eb9f2c 100644
--- a/coregrind/m_libcfile.c
+++ b/coregrind/m_libcfile.c
@@ -78,7 +78,7 @@
    cannot be deduced. */
 Bool VG_(resolve_filename) ( Int fd, const HChar** result )
 {
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    static HChar *buf = NULL;
    static SizeT  bufsiz = 0;
 
@@ -88,7 +88,13 @@
    }
 
    HChar tmp[64];   // large enough
-   VG_(sprintf)(tmp, "/proc/self/fd/%d", fd);
+   {
+#     if defined(VGO_linux)
+      VG_(sprintf)(tmp, "/proc/self/fd/%d", fd);
+#     elif defined(VGO_solaris)
+      VG_(sprintf)(tmp, "/proc/self/path/%d", fd);
+#     endif
+   }
 
    while (42) {
       SSizeT res = VG_(readlink)(tmp, buf, bufsiz);
@@ -139,6 +145,9 @@
 #  elif defined(VGO_linux) || defined(VGO_darwin)
    SysRes res = VG_(do_syscall3)(__NR_mknod,
                                  (UWord)pathname, mode, dev);
+#  elif defined(VGO_solaris)
+   SysRes res = VG_(do_syscall4)(__NR_mknodat,
+                                 VKI_AT_FDCWD, (UWord)pathname, mode, dev);
 #  else
 #    error Unknown OS
 #  endif
@@ -157,6 +166,9 @@
 #  elif defined(VGO_darwin)
    SysRes res = VG_(do_syscall3)(__NR_open_nocancel,
                                  (UWord)pathname, flags, mode);
+#  elif defined(VGO_solaris)
+   SysRes res = VG_(do_syscall4)(__NR_openat,
+                                 VKI_AT_FDCWD, (UWord)pathname, flags, mode);
 #  else
 #    error Unknown OS
 #  endif
@@ -176,7 +188,7 @@
 void VG_(close) ( Int fd )
 {
    /* Hmm.  Return value is not checked.  That's uncool. */
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    (void)VG_(do_syscall1)(__NR_close, fd);
 #  elif defined(VGO_darwin)
    (void)VG_(do_syscall1)(__NR_close_nocancel, fd);
@@ -188,7 +200,7 @@
 Int VG_(read) ( Int fd, void* buf, Int count)
 {
    Int    ret;
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    SysRes res = VG_(do_syscall3)(__NR_read, fd, (UWord)buf, count);
 #  elif defined(VGO_darwin)
    SysRes res = VG_(do_syscall3)(__NR_read_nocancel, fd, (UWord)buf, count);
@@ -208,7 +220,7 @@
 Int VG_(write) ( Int fd, const void* buf, Int count)
 {
    Int    ret;
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    SysRes res = VG_(do_syscall3)(__NR_write, fd, (UWord)buf, count);
 #  elif defined(VGO_darwin)
    SysRes res = VG_(do_syscall3)(__NR_write_nocancel, fd, (UWord)buf, count);
@@ -252,6 +264,18 @@
       fd[1] = (Int)sr_ResHI(res);
    }
    return sr_isError(res) ? -1 : 0;
+#  elif defined(VGO_solaris)
+#  if defined(SOLARIS_NEW_PIPE_SYSCALL)
+   SysRes res = VG_(do_syscall2)(__NR_pipe, (UWord)fd, 0);
+   return sr_isError(res) ? -1 : 0;
+#  else
+   SysRes res = VG_(do_syscall0)(__NR_pipe);
+   if (!sr_isError(res)) {
+      fd[0] = (Int)sr_Res(res);
+      fd[1] = (Int)sr_ResHI(res);
+   }
+   return sr_isError(res) ? -1 : 0;
+#  endif
 #  else
 #    error "Unknown OS"
 #  endif
@@ -275,6 +299,14 @@
    SysRes res = VG_(do_syscall4)(__NR_lseek, fd, 
                                  offset & 0xffffffff, offset >> 32, whence);
    return sr_isError(res) ? (-1) : sr_Res(res);
+#  elif defined(VGP_x86_solaris)
+   SysRes res = VG_(do_syscall4)(__NR_llseek, fd,
+                                 offset & 0xffffffff, offset >> 32, whence);
+   return sr_isError(res) ? (-1) : ((ULong)sr_ResHI(res) << 32 | sr_Res(res));
+#  elif defined(VGP_amd64_solaris)
+   SysRes res = VG_(do_syscall3)(__NR_lseek, fd, offset, whence);
+   vg_assert(sizeof(Off64T) == sizeof(Word));
+   return sr_isError(res) ? (-1) : sr_Res(res);
 #  else
 #    error "Unknown plat"
 #  endif
@@ -338,7 +370,23 @@
         TRANSLATE_TO_vg_stat(vgbuf, &buf);
      return res;
    }
-
+#  elif defined(VGO_solaris)
+   {
+#     if defined(VGP_x86_solaris)
+      struct vki_stat64 buf64;
+      res = VG_(do_syscall4)(__NR_fstatat64, VKI_AT_FDCWD, (UWord)file_name,
+                             (UWord)&buf64, 0);
+#     elif defined(VGP_amd64_solaris)
+      struct vki_stat buf64;
+      res = VG_(do_syscall4)(__NR_fstatat, VKI_AT_FDCWD, (UWord)file_name,
+                             (UWord)&buf64, 0);
+#     else
+#        error "Unknown platform"
+#     endif
+      if (!sr_isError(res))
+         TRANSLATE_TO_vg_stat(vgbuf, &buf64);
+      return res;
+   }
 #  else
 #    error Unknown OS
 #  endif
@@ -349,7 +397,7 @@
    SysRes res;
    VG_(memset)(vgbuf, 0, sizeof(*vgbuf));
 
-#  if defined(VGO_linux)  ||  defined(VGO_darwin)
+#  if defined(VGO_linux) || defined(VGO_darwin)
    /* First try with fstat64.  If that doesn't work out, fall back to
       the vanilla version. */
 #  if defined(__NR_fstat64)
@@ -369,7 +417,21 @@
         TRANSLATE_TO_vg_stat(vgbuf, &buf);
      return sr_isError(res) ? (-1) : 0;
    }
-
+#  elif defined(VGO_solaris)
+   { 
+#     if defined(VGP_x86_solaris)
+      struct vki_stat64 buf64;
+      res = VG_(do_syscall4)(__NR_fstatat64, (UWord)fd, 0, (UWord)&buf64, 0);
+#     elif defined(VGP_amd64_solaris)
+      struct vki_stat buf64;
+      res = VG_(do_syscall4)(__NR_fstatat, (UWord)fd, 0, (UWord)&buf64, 0);
+#     else
+#        error "Unknown platform"
+#     endif
+      if (!sr_isError(res))
+         TRANSLATE_TO_vg_stat(vgbuf, &buf64);
+      return sr_isError(res) ? (-1) : 0;
+   }
 #  else
 #    error Unknown OS
 #  endif
@@ -407,13 +469,21 @@
 
 SysRes VG_(dup) ( Int oldfd )
 {
+#  if defined(VGO_linux) || defined(VGO_darwin)
    return VG_(do_syscall1)(__NR_dup, oldfd);
+#  elif defined(VGO_solaris)
+   return VG_(do_syscall3)(__NR_fcntl, oldfd, F_DUPFD, 0);
+#  else
+#    error Unknown OS
+#  endif
 }
 
 SysRes VG_(dup2) ( Int oldfd, Int newfd )
 {
 #  if defined(VGO_linux) || defined(VGO_darwin)
    return VG_(do_syscall2)(__NR_dup2, oldfd, newfd);
+#  elif defined(VGO_solaris)
+   return VG_(do_syscall3)(__NR_fcntl, oldfd, F_DUP2FD, newfd);
 #  else
 #    error Unknown OS
 #  endif
@@ -422,7 +492,7 @@
 /* Returns -1 on error. */
 Int VG_(fcntl) ( Int fd, Int cmd, Addr arg )
 {
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    SysRes res = VG_(do_syscall3)(__NR_fcntl, fd, cmd, arg);
 #  elif defined(VGO_darwin)
    SysRes res = VG_(do_syscall3)(__NR_fcntl_nocancel, fd, cmd, arg);
@@ -437,8 +507,13 @@
 #  if defined(VGP_tilegx_linux)
    SysRes res = VG_(do_syscall3)(__NR_renameat, VKI_AT_FDCWD,
                                  (UWord)old_name, (UWord)new_name);
-#  else
+#  elif defined(VGO_linux) || defined(VGO_darwin)
    SysRes res = VG_(do_syscall2)(__NR_rename, (UWord)old_name, (UWord)new_name);
+#  elif defined(VGO_solaris)
+   SysRes res = VG_(do_syscall4)(__NR_renameat, VKI_AT_FDCWD, (UWord)old_name,
+                                 VKI_AT_FDCWD, (UWord)new_name);
+#  else
+#    error "Unknown OS"
 #  endif
    return sr_isError(res) ? (-1) : 0;
 }
@@ -448,8 +523,13 @@
 #  if defined(VGP_arm64_linux) || defined(VGP_tilegx_linux)
    SysRes res = VG_(do_syscall2)(__NR_unlinkat, VKI_AT_FDCWD,
                                                 (UWord)file_name);
-#  else
+#  elif defined(VGO_linux) || defined(VGO_darwin)
    SysRes res = VG_(do_syscall1)(__NR_unlink, (UWord)file_name);
+#  elif defined(VGO_solaris)
+   SysRes res = VG_(do_syscall3)(__NR_unlinkat, VKI_AT_FDCWD,
+                                 (UWord)file_name, 0);
+#  else
+#    error "Unknown OS"
 #  endif
    return sr_isError(res) ? (-1) : 0;
 }
@@ -469,8 +549,7 @@
 Bool VG_(record_startup_wd) ( void )
 {
    vg_assert(!startup_wd_acquired);
-
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    /* Simple: just ask the kernel */
    SysRes res;
    SizeT szB = 0;
@@ -535,6 +614,19 @@
    res = VG_(do_syscall3)(__NR_poll, (UWord)fds, nfds, timeout);
 #  elif defined(VGO_darwin)
    res = VG_(do_syscall3)(__NR_poll_nocancel, (UWord)fds, nfds, timeout);
+#  elif defined(VGO_solaris)
+   struct vki_timespec ts;
+   struct vki_timespec *tsp;
+
+   if (timeout < 0)
+      tsp = NULL;
+   else {  
+      ts.tv_sec = timeout / 1000;
+      ts.tv_nsec = (timeout % 1000) * 1000000;
+      tsp = &ts;
+   }
+
+   res = VG_(do_syscall4)(__NR_pollsys, (UWord)fds, nfds, (UWord)tsp, 0);
 #  else
 #    error "Unknown OS"
 #  endif
@@ -553,18 +645,29 @@
 #  if defined(VGP_arm64_linux) || defined(VGP_tilegx_linux)
    res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD,
                                            (UWord)path, (UWord)buf, bufsiz);
-#  else
+#  elif defined(VGO_linux) || defined(VGO_darwin)
    res = VG_(do_syscall3)(__NR_readlink, (UWord)path, (UWord)buf, bufsiz);
+#  elif defined(VGO_solaris)
+   res = VG_(do_syscall4)(__NR_readlinkat, VKI_AT_FDCWD, (UWord)path,
+                          (UWord)buf, bufsiz);
+#  else
+#    error "Unknown OS"
 #  endif
    return sr_isError(res) ? -1 : sr_Res(res);
 }
 
-#if defined(VGO_linux)
+#if defined(VGO_linux) || defined(VGO_solaris)
 Int VG_(getdents64) (Int fd, struct vki_dirent64 *dirp, UInt count)
 {
    SysRes res;
    /* res = getdents( fd, dirp, count ); */
+#  if defined(VGP_amd64_solaris)
+   /* This silently assumes that dirent64 and dirent on amd64 are same, which
+      they should always be. */
+   res = VG_(do_syscall3)(__NR_getdents, fd, (UWord)dirp, count);
+#  else
    res = VG_(do_syscall3)(__NR_getdents64, fd, (UWord)dirp, count);
+#  endif
    return sr_isError(res) ? -1 : sr_Res(res);
 }
 #endif
@@ -588,8 +691,13 @@
              | (ixusr ? VKI_X_OK : 0);
 #  if defined(VGP_arm64_linux) || defined(VGP_tilegx_linux)
    SysRes res = VG_(do_syscall3)(__NR_faccessat, VKI_AT_FDCWD, (UWord)path, w);
-#  else
+#  elif defined(VGO_linux) || defined(VGO_darwin)
    SysRes res = VG_(do_syscall2)(__NR_access, (UWord)path, w);
+#  elif defined(VGO_solaris)
+   SysRes res = VG_(do_syscall4)(__NR_faccessat, VKI_AT_FDCWD, (UWord)path,
+                                 w, 0);
+#  else
+#    error "Unknown OS"
 #  endif
    return sr_isError(res) ? 1 : 0;   
 
@@ -738,6 +846,14 @@
    res = VG_(do_syscall5)(__NR_pread_nocancel, fd, (UWord)buf, count, 
                           offset & 0xffffffff, offset >> 32);
    return res;
+#  elif defined(VGP_x86_solaris)
+   vg_assert(sizeof(OffT) == 4);
+   res = VG_(do_syscall4)(__NR_pread, fd, (UWord)buf, count, offset);
+   return res;
+#  elif defined(VGP_amd64_solaris)
+   vg_assert(sizeof(OffT) == 8);
+   res = VG_(do_syscall4)(__NR_pread, fd, (UWord)buf, count, offset);
+   return res;
 #  else
 #    error "Unknown platform"
 #  endif
@@ -872,7 +988,7 @@
 */
 Int VG_(connect_via_socket)( const HChar* str )
 {
-#  if defined(VGO_linux) || defined(VGO_darwin)
+#  if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
    Int sd, res;
    struct vki_sockaddr_in servAddr;
    UInt   ip   = 0;
@@ -992,6 +1108,20 @@
    }
    return sr_isError(res) ? -1 : sr_Res(res);
 
+#  elif defined(VGO_solaris)
+   /* XXX There doesn't seem to be an easy way to convince the send syscall to
+      only return EPIPE instead of raising SIGPIPE. EPIPE is only returned if
+      SM_KERNEL is set on the socket. Without serious hackery it looks we
+      can't set this flag.
+
+      Should we wrap the send syscall below into sigprocmask calls to block
+      SIGPIPE?
+    */
+   SysRes res;
+   res = VG_(do_syscall5)(__NR_so_socket, domain, type, protocol,
+                          0 /*devpath*/, VKI_SOV_DEFAULT /*version*/);
+   return sr_isError(res) ? -1 : sr_Res(res);
+
 #  else
 #    error "Unknown arch"
 #  endif
@@ -1025,6 +1155,12 @@
                           sockfd, (UWord)serv_addr, addrlen);
    return sr_isError(res) ? -1 : sr_Res(res);
 
+#  elif defined(VGO_solaris)
+   SysRes res;
+   res = VG_(do_syscall4)(__NR_connect, sockfd, (UWord)serv_addr, addrlen,
+                          VKI_SOV_DEFAULT /*version*/);
+   return sr_isError(res) ? -1 : sr_Res(res);
+
 #  else
 #    error "Unknown arch"
 #  endif
@@ -1066,6 +1202,11 @@
    res = VG_(do_syscall3)(__NR_write_nocancel, sd, (UWord)msg, count);
    return sr_isError(res) ? -1 : sr_Res(res);
 
+#  elif defined(VGO_solaris)
+   SysRes res;
+   res = VG_(do_syscall4)(__NR_send, sd, (UWord)msg, count, 0 /*flags*/);
+   return sr_isError(res) ? -1 : sr_Res(res);
+
 #  else
 #    error "Unknown platform"
 #  endif
@@ -1099,6 +1240,12 @@
                            (UWord)sd, (UWord)name, (UWord)namelen );
    return sr_isError(res) ? -1 : sr_Res(res);
 
+#  elif defined(VGO_solaris)
+   SysRes res;
+   res = VG_(do_syscall4)(__NR_getsockname, sd, (UWord)name, (UWord)namelen,
+                          VKI_SOV_DEFAULT /*version*/);
+   return sr_isError(res) ? -1 : sr_Res(res);
+
 #  else
 #    error "Unknown platform"
 #  endif
@@ -1132,6 +1279,12 @@
                            (UWord)sd, (UWord)name, (UWord)namelen );
    return sr_isError(res) ? -1 : sr_Res(res);
 
+#  elif defined(VGO_solaris)
+   SysRes res;
+   res = VG_(do_syscall4)(__NR_getpeername, sd, (UWord)name, (UWord)namelen,
+                          VKI_SOV_DEFAULT /*version*/);
+   return sr_isError(res) ? -1 : sr_Res(res);
+
 #  else
 #    error "Unknown platform"
 #  endif
@@ -1169,6 +1322,12 @@
                            (UWord)optval, (UWord)optlen );
    return sr_isError(res) ? -1 : sr_Res(res);
 
+#  elif defined(VGO_solaris)
+   SysRes res;
+   res = VG_(do_syscall6)(__NR_getsockopt, sd, level, optname, (UWord)optval,
+                          (UWord)optlen, VKI_SOV_DEFAULT /*version*/);
+   return sr_isError(res) ? -1 : sr_Res(res);
+
 #  else
 #    error "Unknown platform"
 #  endif
@@ -1207,6 +1366,14 @@
                            (UWord)optval, (UWord)optlen );
    return sr_isError(res) ? -1 : sr_Res(res);
 
+#  elif defined(VGO_solaris)
+   SysRes res;
+   res = VG_(do_syscall6)( __NR_setsockopt,
+                           (UWord)sd, (UWord)level, (UWord)optname,
+                           (UWord)optval, (UWord)optlen,
+                           VKI_SOV_DEFAULT /*version*/ );
+   return sr_isError(res) ? -1 : sr_Res(res);
+
 #  else
 #    error "Unknown platform"
 #  endif
diff --git a/coregrind/m_libcprint.c b/coregrind/m_libcprint.c
index 114467f..0ebc00c 100644
--- a/coregrind/m_libcprint.c
+++ b/coregrind/m_libcprint.c
@@ -660,7 +660,7 @@
    return buf;
 }
 
-#elif defined(VGO_darwin)
+#elif defined(VGO_darwin) || (VGO_solaris)
 
 const HChar *VG_(sr_as_string) ( SysRes sr )
 {
diff --git a/coregrind/m_libcproc.c b/coregrind/m_libcproc.c
index 321fdf3..6e6f99b 100644
--- a/coregrind/m_libcproc.c
+++ b/coregrind/m_libcproc.c
@@ -34,6 +34,7 @@
 #include "pub_core_vkiscnums.h"
 #include "pub_core_libcbase.h"
 #include "pub_core_libcassert.h"
+#include "pub_core_libcfile.h"
 #include "pub_core_libcprint.h"
 #include "pub_core_libcproc.h"
 #include "pub_core_libcsignal.h"
@@ -66,7 +67,7 @@
 const HChar *VG_(libdir) = VG_LIBDIR;
 
 const HChar *VG_(LD_PRELOAD_var_name) =
-#if defined(VGO_linux)
+#if defined(VGO_linux) || defined(VGO_solaris)
    "LD_PRELOAD";
 #elif defined(VGO_darwin)
    "DYLD_INSERT_LIBRARIES";
@@ -90,7 +91,9 @@
    return NULL;
 }
 
-void  VG_(env_unsetenv) ( HChar **env, const HChar *varname )
+/* If free_fn is not NULL, it is called on "unset" environment variable. */
+void  VG_(env_unsetenv) ( HChar **env, const HChar *varname,
+                          void (*free_fn) (void *) )
 {
    HChar **from, **to;
    vg_assert(env);
@@ -102,6 +105,8 @@
       if (!(VG_(strncmp)(varname, *from, len) == 0 && (*from)[len] == '=')) {
 	 *to = *from;
 	 to++;
+      } else if (free_fn != NULL) {
+         free_fn(*from);
       }
    }
    *to = *from;
@@ -216,9 +221,14 @@
 }
 
 
-// Removes all the Valgrind-added stuff from the passed environment.  Used
-// when starting child processes, so they don't see that added stuff.
-void VG_(env_remove_valgrind_env_stuff)(HChar** envp)
+/* Removes all the Valgrind-added stuff from the passed environment.  Used
+   when starting child processes, so they don't see that added stuff.
+   If the ro_strings option is set to True then all strings referenced by envp
+   are considered read-only, which means they will be duplicated before they
+   are modified.
+   If free_fn is not NULL, it is called on "unset" environment variables. */
+void VG_(env_remove_valgrind_env_stuff)(HChar** envp, Bool ro_strings,
+                                        void (*free_fn) (void *) )
 {
 
 #if defined(VGO_darwin)
@@ -241,15 +251,18 @@
    // - DYLD_INSERT_LIBRARIES and DYLD_SHARED_REGION are Darwin-only
    for (i = 0; envp[i] != NULL; i++) {
       if (VG_(strncmp)(envp[i], "LD_PRELOAD=", 11) == 0) {
-         envp[i] = VG_(strdup)("libcproc.erves.1", envp[i]);
+         if (ro_strings)
+            envp[i] = VG_(strdup)("libcproc.erves.1", envp[i]);
          ld_preload_str = &envp[i][11];
       }
       if (VG_(strncmp)(envp[i], "LD_LIBRARY_PATH=", 16) == 0) {
-         envp[i] = VG_(strdup)("libcproc.erves.2", envp[i]);
+         if (ro_strings)
+            envp[i] = VG_(strdup)("libcproc.erves.2", envp[i]);
          ld_library_path_str = &envp[i][16];
       }
       if (VG_(strncmp)(envp[i], "DYLD_INSERT_LIBRARIES=", 22) == 0) {
-         envp[i] = VG_(strdup)("libcproc.erves.3", envp[i]);
+         if (ro_strings)
+            envp[i] = VG_(strdup)("libcproc.erves.3", envp[i]);
          dyld_insert_libraries_str = &envp[i][22];
       }
    }
@@ -264,16 +277,79 @@
    mash_colon_env(ld_library_path_str, buf);
 
    // Remove VALGRIND_LAUNCHER variable.
-   VG_(env_unsetenv)(envp, VALGRIND_LAUNCHER);
+   VG_(env_unsetenv)(envp, VALGRIND_LAUNCHER, free_fn);
 
    // Remove DYLD_SHARED_REGION variable.
-   VG_(env_unsetenv)(envp, "DYLD_SHARED_REGION");
+   VG_(env_unsetenv)(envp, "DYLD_SHARED_REGION", free_fn);
 
    // XXX if variable becomes empty, remove it completely?
 
    VG_(free)(buf);
 }
 
+/* Resolves filename of VG_(cl_exec_fd) and copies it to the buffer.
+   Buffer must not be NULL and buf_size must be at least 1.
+   If buffer is not large enough it is terminated with '\0' only
+   when 'terminate_with_NUL == True'. */
+void VG_(client_fname)(HChar *buffer, SizeT buf_size, Bool terminate_with_NUL)
+{
+   vg_assert(buffer != NULL);
+   vg_assert(buf_size >= 1);
+
+   const HChar *name;
+   if (VG_(resolve_filename)(VG_(cl_exec_fd), &name)) {
+      const HChar *n = name + VG_(strlen)(name) - 1;
+
+      while (n > name && *n != '/')
+         n--;
+      if (n != name)
+         n++;
+
+      VG_(strncpy)(buffer, n, buf_size);
+      if (terminate_with_NUL)
+         buffer[buf_size - 1] = '\0';
+   } else {
+      buffer[0] = '\0';
+   }
+}
+
+static Bool add_string(HChar *buffer, SizeT *buf_size, const HChar *string)
+{
+   SizeT len = VG_(strlen)(string);
+   VG_(strncat)(buffer, string, *buf_size);
+   if (len >= *buf_size - 1) {
+      *buf_size = 0;
+      return False;
+   } else {
+      *buf_size -= len;
+      return True;
+   }
+}
+
+/* Concatenates client exename and command line arguments into
+   the buffer. Buffer must not be NULL and buf_size must be
+   at least 1. Buffer is always terminated with '\0'. */
+void VG_(client_cmd_and_args)(HChar *buffer, SizeT buf_size)
+{
+   vg_assert(buffer != NULL);
+   vg_assert(buf_size >= 1);
+
+   buffer[0] = '\0';
+
+   if (add_string(buffer, &buf_size, VG_(args_the_exename)) == False)
+      return;
+
+   Int i;
+   for (i = 0; i < VG_(sizeXA)(VG_(args_for_client)); i++) {
+      if (add_string(buffer, &buf_size, " ") == False)
+         return;
+
+      HChar *arg = *(HChar **) VG_(indexXA)(VG_(args_for_client), i);
+      if (add_string(buffer, &buf_size, arg) == False)
+         return;
+   }
+}
+
 /* ---------------------------------------------------------------------
    Various important syscall wrappers
    ------------------------------------------------------------------ */
@@ -288,6 +364,63 @@
    SysRes res = VG_(do_syscall4)(__NR_wait4_nocancel,
                                  pid, (UWord)status, options, 0);
    return sr_isError(res) ? -1 : sr_Res(res);
+#  elif defined(VGO_solaris)
+   SysRes res;
+   vki_idtype_t idtype;
+   vki_id_t id;
+   vki_siginfo_t info;
+
+   /* We need to do a lot of work here. */
+
+   if (pid > 0) {
+      idtype = VKI_P_PID;
+      id = pid;
+   }
+   else if (pid < -1) {
+      idtype = VKI_P_PGID;
+      id = -pid;
+   }
+   else if (pid == -1) {
+      idtype = VKI_P_ALL;
+      id = 0;
+   }
+   else {
+      idtype = VKI_P_PGID;
+      res = VG_(do_syscall0)(__NR_getpid);
+      id = sr_ResHI(res);
+   }
+
+   options |= VKI_WEXITED | VKI_WTRAPPED;
+
+   res = VG_(do_syscall4)(__NR_waitsys, idtype, id, (UWord)&info, options);
+   if (sr_isError(res))
+      return -1;
+
+   if (status) {
+      Int s = info.si_status & 0xff;
+
+      switch (info.si_code) {
+         case VKI_CLD_EXITED:
+            s <<= 8;
+            break;
+         case VKI_CLD_DUMPED:
+            s |= VKI_WCOREFLG;
+            break;
+         case VKI_CLD_KILLED:
+            break;
+         case VKI_CLD_TRAPPED:
+         case VKI_CLD_STOPPED:
+            s <<= 8;
+            s |= VKI_WSTOPFLG;
+            break;
+         case VKI_CLD_CONTINUED:
+            s = VKI_WCONTFLG;
+            break;
+      }
+      *status = s;
+   }
+
+   return info.si_pid;
 #  else
 #    error Unknown OS
 #  endif
@@ -320,7 +453,7 @@
    return newenv;
 }
 
-void VG_(execv) ( const HChar* filename, HChar** argv )
+void VG_(execv) ( const HChar* filename, const HChar** argv )
 {
    HChar** envp;
    SysRes res;
@@ -329,7 +462,7 @@
    VG_(setrlimit)(VKI_RLIMIT_DATA, &VG_(client_rlimit_data));
 
    envp = VG_(env_clone)(VG_(client_envp));
-   VG_(env_remove_valgrind_env_stuff)( envp );
+   VG_(env_remove_valgrind_env_stuff)( envp, True /*ro_strings*/, NULL );
 
    res = VG_(do_syscall3)(__NR_execve,
                           (UWord)filename, (UWord)argv, (UWord)envp);
@@ -337,6 +470,93 @@
    VG_(printf)("EXEC failed, errno = %lld\n", (Long)sr_Err(res));
 }
 
+/* Spawns a new child. Uses either spawn syscall or fork+execv combo. */
+Int VG_(spawn) ( const HChar *filename, const HChar **argv )
+{
+   vg_assert(filename != NULL);
+   vg_assert(argv != NULL);
+
+#  if defined(VGO_solaris) && defined(SOLARIS_SPAWN_SYSCALL)
+   HChar **envp = VG_(env_clone)(VG_(client_envp));
+   for (HChar **p = envp; *p != NULL; p++) {
+      *p = VG_(strdup)("libcproc.s.1", *p);
+   }
+   VG_(env_remove_valgrind_env_stuff)(envp, /* ro_strings */ False, VG_(free));
+
+   /* Now combine argv and argp into argenv. */
+   SizeT argenv_size = 1 + 1;
+   for (const HChar **p = argv; *p != NULL; p++) {
+      argenv_size += VG_(strlen)(*p) + 2;
+   }
+   for (HChar **p = envp; *p != NULL; p++) {
+      argenv_size += VG_(strlen)(*p) + 2;
+   }
+
+   HChar *argenv = VG_(malloc)("libcproc.s.2", argenv_size);
+   HChar *current = argenv;
+#  define COPY_CHAR_TO_ARGENV(dst, character)  \
+      do {                                     \
+         *(dst) = character;                   \
+         (dst) += 1;                           \
+      } while (0)
+#  define COPY_STRING_TO_ARGENV(dst, src)        \
+      do {                                       \
+         COPY_CHAR_TO_ARGENV(dst, '\1');         \
+         SizeT src_len = VG_(strlen)((src)) + 1; \
+         VG_(memcpy)((dst), (src), src_len);     \
+         (dst) += src_len;                       \
+      } while (0)
+
+   for (const HChar **p = argv; *p != NULL; p++) {
+      COPY_STRING_TO_ARGENV(current, *p);
+   }
+   COPY_CHAR_TO_ARGENV(current, '\0');
+   for (HChar **p = envp; *p != NULL; p++) {
+      COPY_STRING_TO_ARGENV(current, *p);
+   }
+   COPY_CHAR_TO_ARGENV(current, '\0');
+   vg_assert(current == argenv + argenv_size);
+#  undef COPY_CHAR_TO_ARGENV
+#  undef COPY_STRING_TOARGENV
+
+   /* HACK: Temporarily restore the DATA rlimit for spawned child. */
+   VG_(setrlimit)(VKI_RLIMIT_DATA, &VG_(client_rlimit_data));
+
+   SysRes res = VG_(do_syscall5)(__NR_spawn, (UWord) filename, (UWord) NULL, 0,
+                                 (UWord) argenv, argenv_size);
+
+   /* Restore DATA rlimit back to its previous value set in m_main.c. */
+   struct vki_rlimit zero = { 0, 0 };
+   zero.rlim_max = VG_(client_rlimit_data).rlim_max;
+   VG_(setrlimit)(VKI_RLIMIT_DATA, &zero);
+
+   VG_(free)(argenv);
+   for (HChar **p = envp; *p != NULL; p++) {
+      VG_(free)(*p);
+   }
+   VG_(free)(envp);
+
+   if (sr_isError(res))
+      return -1;
+   return sr_Res(res);
+
+#  else
+
+   Int pid = VG_(fork)();
+   if (pid < 0)
+      return -1;
+   if (pid == 0) {
+      /* child */
+      VG_(execv)(argv[0], argv);
+
+      /* If we're still alive here, execv failed. */
+      VG_(exit)(1);
+   } else {
+      return pid;
+   }
+#  endif /* VGO_solaris && SOLARIS_SPAWN_SYSCALL */
+}
+
 /* Return -1 if error, else 0.  NOTE does not indicate return code of
    child! */
 Int VG_(system) ( const HChar* cmd )
@@ -344,47 +564,42 @@
    Int pid;
    if (cmd == NULL)
       return 1;
-   pid = VG_(fork)();
+
+   const HChar *argv[4] = { "/bin/sh", "-c", cmd, 0 };
+   pid = VG_(spawn)(argv[0], argv);
    if (pid < 0)
       return -1;
-   if (pid == 0) {
-      /* child */
-      const HChar* argv[4] = { "/bin/sh", "-c", cmd, 0 };
-      VG_(execv)(argv[0], CONST_CAST(HChar **,argv));
 
-      /* If we're still alive here, execv failed. */
-      VG_(exit)(1);
-   } else {
-      /* parent */
-      /* We have to set SIGCHLD to its default behaviour in order that
-         VG_(waitpid) works (at least on AIX).  According to the Linux
-         man page for waitpid:
+   vg_assert(pid > 0);
+   /* parent */
+   /* We have to set SIGCHLD to its default behaviour in order that
+      VG_(waitpid) works (at least on AIX).  According to the Linux
+      man page for waitpid:
 
-         POSIX.1-2001 specifies that if the disposition of SIGCHLD is
-         set to SIG_IGN or the SA_NOCLDWAIT flag is set for SIGCHLD
-         (see sigaction(2)), then children that terminate do not
-         become zombies and a call to wait() or waitpid() will block
-         until all children have terminated, and then fail with errno
-         set to ECHILD.  (The original POSIX standard left the
-         behaviour of setting SIGCHLD to SIG_IGN unspecified.)
-      */
-      Int ir, zzz;
-      vki_sigaction_toK_t sa, sa2;
-      vki_sigaction_fromK_t saved_sa;
-      VG_(memset)( &sa, 0, sizeof(sa) );
-      VG_(sigemptyset)(&sa.sa_mask);
-      sa.ksa_handler = VKI_SIG_DFL;
-      sa.sa_flags    = 0;
-      ir = VG_(sigaction)(VKI_SIGCHLD, &sa, &saved_sa);
-      vg_assert(ir == 0);
+      POSIX.1-2001 specifies that if the disposition of SIGCHLD is
+      set to SIG_IGN or the SA_NOCLDWAIT flag is set for SIGCHLD
+      (see sigaction(2)), then children that terminate do not
+      become zombies and a call to wait() or waitpid() will block
+      until all children have terminated, and then fail with errno
+      set to ECHILD.  (The original POSIX standard left the
+      behaviour of setting SIGCHLD to SIG_IGN unspecified.)
+   */
+   Int ir, zzz;
+   vki_sigaction_toK_t sa, sa2;
+   vki_sigaction_fromK_t saved_sa;
+   VG_(memset)( &sa, 0, sizeof(sa) );
+   VG_(sigemptyset)(&sa.sa_mask);
+   sa.ksa_handler = VKI_SIG_DFL;
+   sa.sa_flags    = 0;
+   ir = VG_(sigaction)(VKI_SIGCHLD, &sa, &saved_sa);
+   vg_assert(ir == 0);
 
-      zzz = VG_(waitpid)(pid, NULL, 0);
+   zzz = VG_(waitpid)(pid, NULL, 0);
 
-      VG_(convert_sigaction_fromK_to_toK)( &saved_sa, &sa2 );
-      ir = VG_(sigaction)(VKI_SIGCHLD, &sa2, NULL);
-      vg_assert(ir == 0);
-      return zzz == -1 ? -1 : 0;
-   }
+   VG_(convert_sigaction_fromK_to_toK)( &saved_sa, &sa2 );
+   ir = VG_(sigaction)(VKI_SIGCHLD, &sa2, NULL);
+   vg_assert(ir == 0);
+   return zzz == -1 ? -1 : 0;
 }
 
 Int VG_(sysctl)(Int *name, UInt namelen, void *oldp, SizeT *oldlenp, void *newp, SizeT newlen)
@@ -493,6 +708,10 @@
    // Use Mach thread ports for lwpid instead.
    return mach_thread_self();
 
+#  elif defined(VGO_solaris)
+   SysRes res = VG_(do_syscall0)(__NR_lwp_self);
+   return sr_Res(res);
+
 #  else
 #    error "Unknown OS"
 #  endif
@@ -508,36 +727,69 @@
 Int VG_(getpgrp) ( void )
 {
    /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
+#  if defined(VGO_linux) || defined(VGO_darwin)
    return sr_Res( VG_(do_syscall0)(__NR_getpgrp) );
+#  elif defined(VGO_solaris)
+   /* Uses the shared pgrpsys syscall, 0 for the getpgrp variant. */
+   return sr_Res( VG_(do_syscall1)(__NR_pgrpsys, 0) );
+#  else
+#    error Unknown OS
+#  endif
 }
 
 Int VG_(getppid) ( void )
 {
    /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
+#  if defined(VGO_linux) || defined(VGO_darwin)
    return sr_Res( VG_(do_syscall0)(__NR_getppid) );
+#  elif defined(VGO_solaris)
+   /* Uses the shared getpid/getppid syscall, val2 contains a parent pid. */
+   return sr_ResHI( VG_(do_syscall0)(__NR_getpid) );
+#  else
+#    error Unknown OS
+#  endif
 }
 
 Int VG_(geteuid) ( void )
 {
    /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
-#  if defined(__NR_geteuid32)
-   // We use the 32-bit version if it's supported.  Otherwise, IDs greater
-   // than 65536 cause problems, as bug #151209 showed.
-   return sr_Res( VG_(do_syscall0)(__NR_geteuid32) );
+#  if defined(VGO_linux) || defined(VGO_darwin)
+   {
+#     if defined(__NR_geteuid32)
+      // We use the 32-bit version if it's supported.  Otherwise, IDs greater
+      // than 65536 cause problems, as bug #151209 showed.
+      return sr_Res( VG_(do_syscall0)(__NR_geteuid32) );
+#     else
+      return sr_Res( VG_(do_syscall0)(__NR_geteuid) );
+#     endif
+   }
+#  elif defined(VGO_solaris)
+   /* Uses the shared getuid/geteuid syscall, val2 contains the effective
+      uid. */
+   return sr_ResHI( VG_(do_syscall0)(__NR_getuid) );
 #  else
-   return sr_Res( VG_(do_syscall0)(__NR_geteuid) );
+#    error Unknown OS
 #  endif
 }
 
 Int VG_(getegid) ( void )
 {
+#  if defined(VGO_linux) || defined(VGO_darwin)
    /* ASSUMES SYSCALL ALWAYS SUCCEEDS */
-#  if defined(__NR_getegid32)
+#    if defined(__NR_getegid32)
    // We use the 32-bit version if it's supported.  Otherwise, IDs greater
    // than 65536 cause problems, as bug #151209 showed.
    return sr_Res( VG_(do_syscall0)(__NR_getegid32) );
-#  else
+#    else
    return sr_Res( VG_(do_syscall0)(__NR_getegid) );
+#    endif
+
+#  elif defined(VGO_solaris)
+   /* Uses the shared getgid/getegid syscall, val2 contains the effective
+      gid. */
+   return sr_ResHI( VG_(do_syscall0)(__NR_getgid) );
+#  else
+#    error Unknown OS
 #  endif
 }
 
@@ -568,7 +820,8 @@
 #  elif defined(VGP_amd64_linux) || defined(VGP_arm_linux) \
         || defined(VGP_ppc64be_linux) || defined(VGP_ppc64le_linux)  \
         || defined(VGO_darwin) || defined(VGP_s390x_linux)    \
-        || defined(VGP_mips32_linux) || defined(VGP_arm64_linux)
+        || defined(VGP_mips32_linux) || defined(VGP_arm64_linux) \
+        || defined(VGO_solaris)
    SysRes sres;
    sres = VG_(do_syscall2)(__NR_getgroups, size, (Addr)list);
    if (sr_isError(sres))
@@ -587,7 +840,17 @@
 Int VG_(ptrace) ( Int request, Int pid, void *addr, void *data )
 {
    SysRes res;
+#  if defined(VGO_linux) || defined(VGO_darwin)
    res = VG_(do_syscall4)(__NR_ptrace, request, pid, (UWord)addr, (UWord)data);
+#  elif defined(VGO_solaris)
+   /* There is no ptrace syscall on Solaris.  Such requests has to be
+      implemented using the /proc interface.  Callers of VG_(ptrace) should
+      ensure that this function is not reached on Solaris, i.e. they must
+      provide a special code for Solaris for whatever feature they provide. */
+   I_die_here;
+#  else
+#    error Unknown OS
+#  endif
    if (sr_isError(res))
       return -1;
    return sr_Res(res);
@@ -625,6 +888,24 @@
    }
    return sr_Res(res);
 
+#  elif defined(VGO_solaris)
+   /* Using fork() on Solaris is not really the best thing to do. Solaris
+      does not do memory overcommitment so fork() can fail if there is not
+      enough memory to copy the current process into a new one.
+      Prefer to use VG_(spawn)() over VG_(fork)() + VG_(execv)(). */
+   SysRes res;
+   res = VG_(do_syscall2)(__NR_forksys, 0 /*subcode (fork)*/, 0 /*flags*/);
+   if (sr_isError(res))
+      return -1;
+   /* On success:
+        val = a pid of the child in the parent, a pid of the parent in the
+              child,
+        val2 = 0 in the parent process, 1 in the child process. */
+   if (sr_ResHI(res) != 0) {
+      return 0;
+   }
+   return sr_Res(res);
+
 #  else
 #    error "Unknown OS"
 #  endif
@@ -640,7 +921,7 @@
    static ULong base = 0;
    ULong  now;
 
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    { SysRes res;
      struct vki_timespec ts_now;
      res = VG_(do_syscall2)(__NR_clock_gettime, VKI_CLOCK_MONOTONIC,
@@ -649,6 +930,8 @@
         now = ts_now.tv_sec * 1000000ULL + ts_now.tv_nsec / 1000;
      } else {
        struct vki_timeval tv_now;
+       /* Note: On Solaris, this syscall takes only one parameter but the
+          extra dummy one does not cause any harm. */
        res = VG_(do_syscall2)(__NR_gettimeofday, (UWord)&tv_now, (UWord)NULL);
        vg_assert(! sr_isError(res));
        now = tv_now.tv_sec * 1000000ULL + tv_now.tv_usec;
diff --git a/coregrind/m_libcsignal.c b/coregrind/m_libcsignal.c
index a380f05..b84bc08 100644
--- a/coregrind/m_libcsignal.c
+++ b/coregrind/m_libcsignal.c
@@ -37,6 +37,11 @@
 #include "pub_core_syscall.h"
 #include "pub_core_libcsignal.h"    /* self */
 
+#if !defined(VGO_solaris)
+#   define _VKI_MAXSIG (_VKI_NSIG - 1)
+#endif
+STATIC_ASSERT((_VKI_MAXSIG % _VKI_NSIG_BPW) != 0);
+
 /* IMPORTANT: on Darwin it is essential to use the _nocancel versions
    of syscalls rather than the vanilla version, if a _nocancel version
    is available.  See docs/internals/Darwin-notes.txt for the reason
@@ -49,6 +54,13 @@
    64-bits.  And which they are it doesn't necessarily follow from the
    host word size. */
 
+/* Functions VG_(isemptysigset) and VG_(isfullsigset) check only bits that
+   represent valid signals (i.e. signals <= _VKI_MAXSIG).  The same applies
+   for the comparison in VG_(iseqsigset).  This is important because when
+   a signal set is received from an operating system then bits which represent
+   signals > _VKI_MAXSIG can have unexpected values for Valgrind. This is
+   mainly specific to the Solaris kernel which clears these bits. */
+
 Int VG_(sigfillset)( vki_sigset_t* set )
 {
    Int i;
@@ -73,8 +85,18 @@
 {
    Int i;
    vg_assert(set != NULL);
-   for (i = 0; i < _VKI_NSIG_WORDS; i++)
-      if (set->sig[i] != 0) return False;
+   for (i = 0; i < _VKI_NSIG_WORDS; i++) {
+      if (_VKI_NSIG_BPW * (i + 1) <= (_VKI_MAXSIG + 1)) {
+         /* Full word check. */
+         if (set->sig[i] != 0) return False;
+      }
+      else {
+         /* Partial word check. */
+         ULong mask = (1UL << (_VKI_MAXSIG % _VKI_NSIG_BPW)) - 1;
+         if ((set->sig[i] & mask) != 0) return False;
+         break;
+      }
+   }
    return True;
 }
 
@@ -82,8 +104,18 @@
 {
    Int i;
    vg_assert(set != NULL);
-   for (i = 0; i < _VKI_NSIG_WORDS; i++)
-      if (set->sig[i] != ~0) return False;
+   for (i = 0; i < _VKI_NSIG_WORDS; i++) {
+      if (_VKI_NSIG_BPW * (i + 1) <= (_VKI_MAXSIG + 1)) {
+         /* Full word check. */
+         if (set->sig[i] != ~0) return False;
+      }
+      else {
+         /* Partial word check. */
+         ULong mask = (1UL << (_VKI_MAXSIG % _VKI_NSIG_BPW)) - 1;
+         if ((set->sig[i] & mask) != mask) return False;
+         break;
+      }
+   }
    return True;
 }
 
@@ -91,8 +123,18 @@
 {
    Int i;
    vg_assert(set1 != NULL && set2 != NULL);
-   for (i = 0; i < _VKI_NSIG_WORDS; i++)
-      if (set1->sig[i] != set2->sig[i]) return False;
+   for (i = 0; i < _VKI_NSIG_WORDS; i++) {
+      if (_VKI_NSIG_BPW * (i + 1) <= (_VKI_MAXSIG + 1)) {
+         /* Full word comparison. */
+         if (set1->sig[i] != set2->sig[i]) return False;
+      }
+      else {
+         /* Partial word comparison. */
+         ULong mask = (1UL << (_VKI_MAXSIG % _VKI_NSIG_BPW)) - 1;
+         if ((set1->sig[i] & mask) != (set1->sig[i] & mask)) return False;
+         break;
+      }
+   }
    return True;
 }
 
@@ -174,7 +216,7 @@
 */
 Int VG_(sigprocmask)( Int how, const vki_sigset_t* set, vki_sigset_t* oldset)
 {
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
 #  if defined(__NR_rt_sigprocmask)
    SysRes res = VG_(do_syscall4)(__NR_rt_sigprocmask, 
                                  how, (UWord)set, (UWord)oldset, 
@@ -272,6 +314,12 @@
    }
    return sr_isError(res) ? -1 : 0;
 
+#  elif defined(VGO_solaris)
+   /* vki_sigaction_toK_t and vki_sigaction_fromK_t are identical types. */
+   SysRes res = VG_(do_syscall3)(__NR_sigaction,
+                                 signum, (UWord)act, (UWord)oldact);
+   return sr_isError(res) ? -1 : 0;
+
 #  else
 #    error "Unsupported OS"
 #  endif
@@ -283,7 +331,7 @@
 VG_(convert_sigaction_fromK_to_toK)( const vki_sigaction_fromK_t* fromK,
                                      /*OUT*/vki_sigaction_toK_t* toK )
 {
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    *toK = *fromK;
 #  elif defined(VGO_darwin)
    toK->ksa_handler = fromK->ksa_handler;
@@ -298,7 +346,7 @@
 
 Int VG_(kill)( Int pid, Int signo )
 {
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    SysRes res = VG_(do_syscall2)(__NR_kill, pid, signo);
 #  elif defined(VGO_darwin)
    SysRes res = VG_(do_syscall3)(__NR_kill,
@@ -324,6 +372,21 @@
    res = VG_(do_syscall2)(__NR___pthread_kill, lwpid, signo);
    return sr_isError(res) ? -1 : 0;
 
+#  elif defined(VGO_solaris)
+   SysRes res;
+#     if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
+#        if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID)
+            res = VG_(do_syscall6)(__NR_lwp_sigqueue, 0, lwpid, signo,
+                                   0, VKI_SI_LWP, 0);
+#        else
+            res = VG_(do_syscall5)(__NR_lwp_sigqueue, lwpid, signo,
+                                   0, VKI_SI_LWP, 0);
+#        endif
+#     else
+         res = VG_(do_syscall2)(__NR_lwp_kill, lwpid, signo);
+#     endif
+   return sr_isError(res) ? -1 : 0;
+
 #  else
 #    error "Unsupported plat"
 #  endif
@@ -477,6 +540,16 @@
   return i;
 }
 
+#elif defined(VGO_solaris)
+Int VG_(sigtimedwait_zero)( const vki_sigset_t *set, vki_siginfo_t *info )
+{
+   /* Trivial as on Linux. */
+   static const struct vki_timespec zero = { 0, 0 };
+   SysRes res = VG_(do_syscall3)(__NR_sigtimedwait, (UWord)set, (UWord)info,
+                                 (UWord)&zero);
+   return sr_isError(res) ? -1 : sr_Res(res);
+}
+
 #else
 #  error "Unknown OS"
 #endif
diff --git a/coregrind/m_machine.c b/coregrind/m_machine.c
index e7b826f..87473c6 100644
--- a/coregrind/m_machine.c
+++ b/coregrind/m_machine.c
@@ -1840,7 +1840,8 @@
       || defined(VGP_ppc32_linux) || defined(VGP_ppc64le_linux) \
       || defined(VGP_s390x_linux) || defined(VGP_mips32_linux) \
       || defined(VGP_mips64_linux) || defined(VGP_arm64_linux) \
-      || defined(VGP_tilegx_linux)
+      || defined(VGP_tilegx_linux) || defined(VGP_x86_solaris) \
+      || defined(VGP_amd64_solaris)
    return f;
 #  elif defined(VGP_ppc64be_linux)
    /* ppc64-linux uses the AIX scheme, in which f is a pointer to a
diff --git a/coregrind/m_main.c b/coregrind/m_main.c
index 39c03ab..9b491e5 100644
--- a/coregrind/m_main.c
+++ b/coregrind/m_main.c
@@ -181,7 +181,7 @@
 "    --run-libc-freeres=no|yes free up glibc memory at exit on Linux? [yes]\n"
 "    --sim-hints=hint1,hint2,...  activate unusual sim behaviours [none] \n"
 "         where hint is one of:\n"
-"           lax-ioctls fuse-compatible enable-outer\n"
+"           lax-ioctls lax-doors fuse-compatible enable-outer\n"
 "           no-inner-prefix no-nptl-pthread-stackcache none\n"
 "    --fair-sched=no|yes|try   schedule threads fairly on multicore systems [no]\n"
 "    --kernel-variant=variant1,variant2,...\n"
@@ -410,7 +410,7 @@
       // running in an outer, to have "no-inner-prefix" enabled
       // as early as possible.
       else if VG_USETX_CLO (str, "--sim-hints",
-                            "lax-ioctls,fuse-compatible,"
+                            "lax-ioctls,lax-doors,fuse-compatible,"
                             "enable-outer,no-inner-prefix,"
                             "no-nptl-pthread-stackcache",
                             VG_(clo_sim_hints)) {}
@@ -1443,7 +1443,7 @@
       VG_(umsg)("\n");
 
    if (VG_(clo_verbosity) > 1) {
-# if !defined(VGO_darwin)
+# if defined(VGO_linux)
       SysRes fd;
 # endif
       VexArch vex_arch;
@@ -1457,7 +1457,7 @@
                      * (HChar**) VG_(indexXA)( VG_(args_for_valgrind), i ));
       }
 
-# if !defined(VGO_darwin)
+# if defined(VGO_linux)
       VG_(message)(Vg_DebugMsg, "Contents of /proc/version:\n");
       fd = VG_(open) ( "/proc/version", VKI_O_RDONLY, 0 );
       if (sr_isError(fd)) {
@@ -1479,7 +1479,7 @@
          VG_(message)(Vg_DebugMsg, "\n");
          VG_(close)(fdno);
       }
-# else
+# elif defined(VGO_darwin)
       VG_(message)(Vg_DebugMsg, "Output from sysctl({CTL_KERN,KERN_VERSION}):\n");
       /* Note: preferable to use sysctlbyname("kern.version", kernelVersion, &len, NULL, 0)
          however that syscall is OS X 10.10+ only. */
@@ -1490,6 +1490,20 @@
       VG_(sysctl)(mib, sizeof(mib)/sizeof(Int), kernelVersion, &len, NULL, 0);
       VG_(message)(Vg_DebugMsg, "  %s\n", kernelVersion);
       VG_(free)( kernelVersion );
+# elif defined(VGO_solaris)
+      /* There is no /proc/version file on Solaris so we try to get some
+         system information using the uname(2) syscall. */
+      {
+         struct vki_utsname uts;
+
+         VG_(message)(Vg_DebugMsg, "System information:\n");
+         SysRes res = VG_(do_syscall1)(__NR_uname, (UWord)&uts);
+         if (sr_isError(res))
+            VG_(message)(Vg_DebugMsg, "  uname() failed\n");
+         else
+            VG_(message)(Vg_DebugMsg, "  %s %s %s %s\n",
+                         uts.sysname, uts.release, uts.version, uts.machine);
+      }
 # endif
 
       VG_(machine_get_VexArchInfo)( &vex_arch, &vex_archinfo );
@@ -1945,7 +1959,7 @@
    if (!need_help) {
       VG_(debugLog)(1, "main", "Create initial image\n");
 
-#     if defined(VGO_linux) || defined(VGO_darwin)
+#     if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
       the_iicii.argv              = argv;
       the_iicii.envp              = envp;
       the_iicii.toolname          = toolname;
@@ -1978,16 +1992,21 @@
    //   p: setup file descriptors
    //   p: ii_create_image for VG_(client_auxv) setup.
    //--------------------------------------------------------------
-#if !defined(VGO_linux)
-   // client shouldn't be using /proc!
    VG_(cl_cmdline_fd) = -1;
    VG_(cl_auxv_fd) = -1;
-#else
+#if defined(VGO_solaris)
+   VG_(cl_psinfo_fd) = -1;
+#endif
+
+#if defined(VGO_linux) || defined(VGO_solaris)
    if (!need_help) {
       HChar  buf[50];   // large enough
       HChar  buf2[VG_(mkstemp_fullname_bufsz)(sizeof buf - 1)];
-      HChar  nul[1];
       Int    fd, r;
+
+#if defined(VGO_linux)
+      /* Fake /proc/<pid>/cmdline only on Linux. */
+      HChar  nul[1];
       const HChar* exename;
 
       VG_(debugLog)(1, "main", "Create fake /proc/<pid>/cmdline\n");
@@ -2018,7 +2037,9 @@
          VG_(err_config_error)("Can't delete client cmdline file in %s\n", buf2);
 
       VG_(cl_cmdline_fd) = fd;
+#endif // defined(VGO_linux)
 
+      /* Fake /proc/<pid>/auxv on both Linux and Solaris. */
       VG_(debugLog)(1, "main", "Create fake /proc/<pid>/auxv\n");
 
       VG_(sprintf)(buf, "proc_%d_auxv", VG_(getpid)());
@@ -2047,6 +2068,24 @@
          VG_(err_config_error)("Can't delete client auxv file in %s\n", buf2);
 
       VG_(cl_auxv_fd) = fd;
+
+#if defined(VGO_solaris)
+      /* Fake /proc/<pid>/psinfo on Solaris.
+       * Contents will be fetched and partially faked later on the fly. */
+      VG_(debugLog)(1, "main", "Create fake /proc/<pid>/psinfo\n");
+
+      VG_(sprintf)(buf, "proc_%d_psinfo", VG_(getpid)());
+      fd = VG_(mkstemp)( buf, buf2 );
+      if (fd == -1)
+         VG_(err_config_error)("Can't create client psinfo file in %s\n", buf2);
+
+      /* Now delete it, but hang on to the fd. */
+      r = VG_(unlink)( buf2 );
+      if (r)
+         VG_(err_config_error)("Can't delete client psinfo file in %s\n", buf2);
+
+      VG_(cl_psinfo_fd) = fd;
+#endif /* VGO_solaris */
    }
 #endif
 
@@ -2165,6 +2204,8 @@
       iters = 10;
 #     elif defined(VGO_darwin)
       iters = 3;
+#     elif defined(VGO_solaris)
+      iters = 10;
 #     else
 #       error "Unknown plat"
 #     endif
@@ -2183,6 +2224,10 @@
       VG_(init_preopened_fds)();
    }
 
+#if defined(VGO_solaris)
+   VG_(syswrap_init)();
+#endif
+
    //--------------------------------------------------------------
    // Load debug info for the existing segments.
    //   p: setup_code_redirect_table [so that redirs can be recorded]
@@ -2207,7 +2252,7 @@
    addr2dihandle = VG_(newXA)( VG_(malloc), "main.vm.2",
                                VG_(free), sizeof(Addr_n_ULong) );
 
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    { Addr* seg_starts;
      Int   n_seg_starts;
      Addr_n_ULong anu;
@@ -2445,7 +2490,19 @@
    //      VG_(ii_create_image)   [for 'the_iicii' initial info]
    //--------------------------------------------------------------
    VG_(debugLog)(1, "main", "Finalise initial image\n");
-   VG_(ii_finalise_image)( the_iifii );
+   { /* Mark the main thread as running while we tell the tool about
+        the client memory which could be tracked during initial image
+        finalisation. So the tool can associate that memory with the
+        main thread. */
+     vg_assert(VG_(running_tid) == VG_INVALID_THREADID);
+     VG_(running_tid) = tid_main;
+
+     VG_(ii_finalise_image)( the_iifii );
+
+     /* Clear the running thread indicator */
+     VG_(running_tid) = VG_INVALID_THREADID;
+     vg_assert(VG_(running_tid) == VG_INVALID_THREADID);
+   }
 
    //--------------------------------------------------------------
    // Initialise the signal handling subsystem
@@ -2665,7 +2722,7 @@
                     "VG_(terminate_NORETURN)(tid=%lld)\n", (ULong)tid);
 
    switch (tids_schedretcode) {
-   case VgSrc_ExitThread:  /* the normal way out (Linux) */
+   case VgSrc_ExitThread:  /* the normal way out (Linux, Solaris) */
    case VgSrc_ExitProcess: /* the normal way out (Darwin) */
       /* Change the application return code to user's return code,
          if an error was found */
@@ -3431,13 +3488,92 @@
    VG_(exit)(r);
 }
 
+/*====================================================================*/
+/*=== Getting to main() alive: Solaris                             ===*/
+/*====================================================================*/
+#elif defined(VGO_solaris)
+#if defined(VGP_x86_solaris)
+/* The kernel hands control to _start, which extracts the initial stack
+   pointer and calls onwards to _start_in_C_solaris.  This also switches to
+   the new stack. */
+asm("\n"
+    "\t.text\n"
+    "\t.globl _start\n"
+    "\t.type _start, @function\n"
+    "_start:\n"
+    /* Set up the new stack in %eax. */
+    "\tmovl  $vgPlain_interim_stack, %eax\n"
+    "\taddl  $"VG_STRINGIFY(VG_STACK_GUARD_SZB)", %eax\n"
+    "\taddl  $"VG_STRINGIFY(VG_DEFAULT_STACK_ACTIVE_SZB)", %eax\n"
+    "\tandl  $~15, %eax\n"
+    /* Install it, and collect the original one. */
+    "\txchgl %eax, %esp\n"
+    "\tsubl  $12, %esp\n"  /* Keep stack 16-byte aligned. */
+    /* Call _start_in_C_solaris, passing it the startup %esp. */
+    "\tpushl %eax\n"
+    "\tcall  _start_in_C_solaris\n"
+    /* NOTREACHED */
+    "\thlt\n"
+    "\t.previous\n"
+);
+#elif defined(VGP_amd64_solaris)
+asm("\n"
+    ".text\n"
+    "\t.globl _start\n"
+    "\t.type _start, @function\n"
+    "_start:\n"
+    /* Set up the new stack in %rdi. */
+    "\tmovq  $vgPlain_interim_stack, %rdi\n"
+    "\taddq  $"VG_STRINGIFY(VG_STACK_GUARD_SZB)", %rdi\n"
+    "\taddq  $"VG_STRINGIFY(VG_DEFAULT_STACK_ACTIVE_SZB)", %rdi\n"
+    "\tandq  $~15, %rdi\n"
+    /* Install it, and collect the original one. */
+    "\txchgq %rdi, %rsp\n"
+    /* Call _start_in_C_solaris, passing it the startup %rsp. */
+    "\tcall  _start_in_C_solaris\n"
+    /* NOTREACHED */
+    "\thlt\n"
+    ".previous\n"
+);
+#else
+#  error "Unknown Solaris platform"
+#endif
+
+void *memcpy(void *dest, const void *src, size_t n);
+void *memcpy(void *dest, const void *src, size_t n) {
+   return VG_(memcpy)(dest, src, n);
+}
+
+__attribute__ ((used))
+void _start_in_C_solaris ( UWord* pArgc );
+__attribute__ ((used))
+void _start_in_C_solaris ( UWord* pArgc )
+{
+   Int     r;
+   Word    argc = pArgc[0];
+   HChar** argv = (HChar**)&pArgc[1];
+   HChar** envp = (HChar**)&pArgc[1 + argc + 1];
+
+   VG_(memset)( &the_iicii, 0, sizeof(the_iicii) );
+   VG_(memset)( &the_iifii, 0, sizeof(the_iifii) );
+
+   the_iicii.sp_at_startup = (Addr)pArgc;
+
+   r = valgrind_main((Int)argc, argv, envp);
+   /* NOTREACHED */
+   VG_(exit)(r);
+}
 
 #else
-
 #  error "Unknown OS"
 #endif
 
 
+Addr VG_(get_initial_client_SP)( void )
+{
+   return the_iifii.initial_client_SP;
+}
+
 /*====================================================================*/
 /*=== {u,}{div,mod}di3 replacements                                ===*/
 /*====================================================================*/
diff --git a/coregrind/m_options.c b/coregrind/m_options.c
index 24938aa..300d620 100644
--- a/coregrind/m_options.c
+++ b/coregrind/m_options.c
@@ -300,7 +300,7 @@
    return c;
 }
 
-/* Should we trace into this child executable (across execve etc) ?
+/* Should we trace into this child executable (across execve, spawn etc) ?
    This involves considering --trace-children=,
    --trace-children-skip=, --trace-children-skip-by-arg=, and the name
    of the executable.  'child_argv' must not include the name of the
diff --git a/coregrind/m_redir.c b/coregrind/m_redir.c
index 09e6a9f..eaad1f8 100644
--- a/coregrind/m_redir.c
+++ b/coregrind/m_redir.c
@@ -406,6 +406,11 @@
    const HChar* const pthread_soname = "libpthread.so.0";
    const HChar* const pthread_stack_cache_actsize_varname
       = "stack_cache_actsize";
+#if defined(VGO_solaris)
+   Bool         vg_vfork_fildes_var_search = False;
+   const HChar* const vg_preload_core_soname = "vgpreload_core.so.0";
+   const HChar* const vg_vfork_fildes_varname = "vg_vfork_fildes";
+#endif
 
 #  if defined(VG_PLAT_USES_PPCTOC)
    check_ppcTOCs = True;
@@ -504,6 +509,11 @@
       SimHintiS(SimHint_no_nptl_pthread_stackcache, VG_(clo_sim_hints))
       && 0 == VG_(strcmp)(newdi_soname, pthread_soname);
 
+#if defined(VGO_solaris)
+   vg_vfork_fildes_var_search =
+      0 == VG_(strcmp)(newdi_soname, vg_preload_core_soname);
+#endif
+
    nsyms = VG_(DebugInfo_syms_howmany)( newdi );
    for (i = 0; i < nsyms; i++) {
       VG_(DebugInfo_syms_getidx)( newdi, i, &sym_avmas,
@@ -534,6 +544,18 @@
                VG_(client__stack_cache_actsize__addr) = (SizeT*) sym_avmas.main;
                dehacktivate_pthread_stack_cache_var_search = False;
             }
+#if defined(VGO_solaris)
+            if (vg_vfork_fildes_var_search
+                && 0 == VG_(strcmp)(*names, vg_vfork_fildes_varname)) {
+               if ( VG_(clo_verbosity) > 1 ) {
+                  VG_(message)( Vg_DebugMsg,
+                                "vfork kludge: found symbol %s at addr %p\n",
+                                *names, (void*) sym_avmas.main);
+               }
+               VG_(vfork_fildes_addr) = (Int*) sym_avmas.main;
+               vg_vfork_fildes_var_search = False;
+            }
+#endif
             continue;
          }
          if (!ok) {
@@ -617,6 +639,15 @@
       VG_(message)(Vg_DebugMsg,
                    "=> pthread stack cache cannot be disabled!\n");
    }
+#if defined(VGO_solaris)
+   if (vg_vfork_fildes_var_search) {
+      VG_(message)(Vg_DebugMsg,
+                   "WARNING: could not find symbol for var %s in %s\n",
+                   vg_vfork_fildes_varname, vg_preload_core_soname);
+      VG_(message)(Vg_DebugMsg,
+                   "=> posix_spawn() will not work correctly!\n");
+   }
+#endif
 
    if (check_ppcTOCs) {
       for (i = 0; i < nsyms; i++) {
@@ -1500,6 +1531,40 @@
       );
    }
 
+#  elif defined(VGP_x86_solaris)
+   /* If we're using memcheck, use these intercepts right from
+      the start, otherwise ld.so makes a lot of noise. */
+   if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+      add_hardwired_spec("/lib/ld.so.1", "strcmp",
+                         (Addr)&VG_(x86_solaris_REDIR_FOR_strcmp), NULL);
+   }
+   if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+      add_hardwired_spec("/lib/ld.so.1", "strlen",
+                         (Addr)&VG_(x86_solaris_REDIR_FOR_strlen), NULL);
+   }
+
+#  elif defined(VGP_amd64_solaris)
+   if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+      add_hardwired_spec("/lib/amd64/ld.so.1", "strcpy",
+                         (Addr)&VG_(amd64_solaris_REDIR_FOR_strcpy), NULL);
+   }
+   if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+      add_hardwired_spec("/lib/amd64/ld.so.1", "strncpy",
+                         (Addr)&VG_(amd64_solaris_REDIR_FOR_strncpy), NULL);
+   }
+   if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+      add_hardwired_spec("/lib/amd64/ld.so.1", "strcmp",
+                         (Addr)&VG_(amd64_solaris_REDIR_FOR_strcmp), NULL);
+   }
+   if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+      add_hardwired_spec("/lib/amd64/ld.so.1", "strcat",
+                         (Addr)&VG_(amd64_solaris_REDIR_FOR_strcat), NULL);
+   }
+   if (0==VG_(strcmp)("Memcheck", VG_(details).name)) {
+      add_hardwired_spec("/lib/amd64/ld.so.1", "strlen",
+                         (Addr)&VG_(amd64_solaris_REDIR_FOR_strlen), NULL);
+   }
+
 #  else
 #    error Unknown platform
 #  endif
diff --git a/coregrind/m_replacemalloc/vg_replace_malloc.c b/coregrind/m_replacemalloc/vg_replace_malloc.c
index fefb8e9..76efc10 100644
--- a/coregrind/m_replacemalloc/vg_replace_malloc.c
+++ b/coregrind/m_replacemalloc/vg_replace_malloc.c
@@ -305,6 +305,12 @@
  ZONEALLOC_or_NULL(VG_Z_LIBC_SONAME,  malloc_zone_malloc, malloc);
  ZONEALLOC_or_NULL(SO_SYN_MALLOC,     malloc_zone_malloc, malloc);
 
+#elif defined(VGO_solaris)
+ ALLOC_or_NULL(VG_Z_LIBSTDCXX_SONAME, malloc,      malloc);
+ ALLOC_or_NULL(VG_Z_LIBC_SONAME,      malloc,      malloc);
+ ALLOC_or_NULL(VG_Z_LIBUMEM_SO_1,     malloc,      malloc);
+ ALLOC_or_NULL(SO_SYN_MALLOC,         malloc,      malloc);
+
 #endif
 
 
@@ -341,6 +347,18 @@
   //ALLOC_or_BOMB(VG_Z_LIBC_SONAME,      _Znwm,          __builtin_new);
  #endif
 
+#elif defined(VGO_solaris)
+ // operator new(unsigned int), GNU mangling
+ #if VG_WORDSIZE == 4
+  ALLOC_or_BOMB(VG_Z_LIBSTDCXX_SONAME, _Znwj,          __builtin_new);
+  ALLOC_or_BOMB(SO_SYN_MALLOC,         _Znwj,          __builtin_new);
+ #endif
+ // operator new(unsigned long), GNU mangling
+ #if VG_WORDSIZE == 8
+  ALLOC_or_BOMB(VG_Z_LIBSTDCXX_SONAME, _Znwm,          __builtin_new);
+  ALLOC_or_BOMB(SO_SYN_MALLOC,         _Znwm,          __builtin_new);
+ #endif
+
 #endif
 
 
@@ -372,6 +390,18 @@
   //ALLOC_or_NULL(VG_Z_LIBC_SONAME,      _ZnwmRKSt9nothrow_t,  __builtin_new);
  #endif
 
+#elif defined(VGO_solaris)
+ // operator new(unsigned, std::nothrow_t const&), GNU mangling
+ #if VG_WORDSIZE == 4
+  ALLOC_or_NULL(VG_Z_LIBSTDCXX_SONAME, _ZnwjRKSt9nothrow_t,  __builtin_new);
+  ALLOC_or_NULL(SO_SYN_MALLOC,         _ZnwjRKSt9nothrow_t,  __builtin_new);
+ #endif
+ // operator new(unsigned long, std::nothrow_t const&), GNU mangling
+ #if VG_WORDSIZE == 8
+  ALLOC_or_NULL(VG_Z_LIBSTDCXX_SONAME, _ZnwmRKSt9nothrow_t,  __builtin_new);
+  ALLOC_or_NULL(SO_SYN_MALLOC,         _ZnwmRKSt9nothrow_t,  __builtin_new);
+ #endif
+
 #endif
 
 
@@ -406,6 +436,18 @@
   //ALLOC_or_BOMB(VG_Z_LIBC_SONAME,      _Znam,             __builtin_vec_new );
  #endif
 
+#elif defined(VGO_solaris)
+ // operator new[](unsigned int), GNU mangling
+ #if VG_WORDSIZE == 4
+  ALLOC_or_BOMB(VG_Z_LIBSTDCXX_SONAME, _Znaj,             __builtin_vec_new );
+  ALLOC_or_BOMB(SO_SYN_MALLOC,         _Znaj,             __builtin_vec_new );
+ #endif
+ // operator new[](unsigned long), GNU mangling
+ #if VG_WORDSIZE == 8
+  ALLOC_or_BOMB(VG_Z_LIBSTDCXX_SONAME, _Znam,             __builtin_vec_new );
+  ALLOC_or_BOMB(SO_SYN_MALLOC,         _Znam,             __builtin_vec_new );
+ #endif
+
 #endif
 
 
@@ -437,6 +479,18 @@
   //ALLOC_or_NULL(VG_Z_LIBC_SONAME,      _ZnamRKSt9nothrow_t, __builtin_vec_new );
  #endif
 
+#elif defined(VGO_solaris)
+ // operator new[](unsigned, std::nothrow_t const&), GNU mangling
+ #if VG_WORDSIZE == 4
+  ALLOC_or_NULL(VG_Z_LIBSTDCXX_SONAME, _ZnajRKSt9nothrow_t, __builtin_vec_new );
+  ALLOC_or_NULL(SO_SYN_MALLOC,         _ZnajRKSt9nothrow_t, __builtin_vec_new );
+ #endif
+ // operator new[](unsigned long, std::nothrow_t const&), GNU mangling
+ #if VG_WORDSIZE == 8
+  ALLOC_or_NULL(VG_Z_LIBSTDCXX_SONAME, _ZnamRKSt9nothrow_t, __builtin_vec_new );
+  ALLOC_or_NULL(SO_SYN_MALLOC,         _ZnamRKSt9nothrow_t, __builtin_vec_new );
+ #endif
+
 #endif
 
 
@@ -482,6 +536,11 @@
  ZONEFREE(VG_Z_LIBC_SONAME,   malloc_zone_free,     free );
  ZONEFREE(SO_SYN_MALLOC,      malloc_zone_free,     free );
 
+#elif defined(VGO_solaris)
+ FREE(VG_Z_LIBC_SONAME,       free,                 free );
+ FREE(VG_Z_LIBUMEM_SO_1,      free,                 free );
+ FREE(SO_SYN_MALLOC,          free,                 free );
+
 #endif
 
 
@@ -497,6 +556,12 @@
  //FREE(VG_Z_LIBSTDCXX_SONAME,  cfree,                free );
  //FREE(VG_Z_LIBC_SONAME,       cfree,                free );
 
+#elif defined(VGO_solaris)
+ FREE(VG_Z_LIBC_SONAME,       cfree,                free );
+ /* libumem does not implement cfree(). */
+ //FREE(VG_Z_LIBUMEM_SO_1,      cfree,                free );
+ FREE(SO_SYN_MALLOC,          cfree,                free );
+
 #endif
 
 
@@ -516,6 +581,11 @@
  //FREE(VG_Z_LIBSTDCXX_SONAME,  _ZdlPv,               __builtin_delete );
  //FREE(VG_Z_LIBC_SONAME,       _ZdlPv,               __builtin_delete );
 
+#elif defined(VGO_solaris)
+ // operator delete(void*), GNU mangling
+ FREE(VG_Z_LIBSTDCXX_SONAME,  _ZdlPv,               __builtin_delete );
+ FREE(SO_SYN_MALLOC,          _ZdlPv,               __builtin_delete );
+
 #endif
 
 
@@ -532,6 +602,11 @@
  //FREE(VG_Z_LIBSTDCXX_SONAME, _ZdlPvRKSt9nothrow_t,  __builtin_delete );
  //FREE(VG_Z_LIBC_SONAME,      _ZdlPvRKSt9nothrow_t,  __builtin_delete );
 
+#elif defined(VGO_solaris)
+ // operator delete(void*, std::nothrow_t const&), GNU mangling
+ FREE(VG_Z_LIBSTDCXX_SONAME, _ZdlPvRKSt9nothrow_t,  __builtin_delete );
+ FREE(SO_SYN_MALLOC,         _ZdlPvRKSt9nothrow_t,  __builtin_delete );
+
 #endif
 
 
@@ -554,6 +629,11 @@
  //FREE(VG_Z_LIBSTDCXX_SONAME,  _ZdaPv,               __builtin_vec_delete );
  //FREE(VG_Z_LIBC_SONAME,       _ZdaPv,               __builtin_vec_delete );
 
+#elif defined(VGO_solaris)
+ // operator delete[](void*), GNU mangling
+ FREE(VG_Z_LIBSTDCXX_SONAME,  _ZdaPv,               __builtin_vec_delete );
+ FREE(SO_SYN_MALLOC,          _ZdaPv,               __builtin_vec_delete );
+
 #endif
 
 
@@ -570,6 +650,11 @@
  //FREE(VG_Z_LIBSTDCXX_SONAME,  _ZdaPvRKSt9nothrow_t, __builtin_vec_delete );
  //FREE(VG_Z_LIBC_SONAME,       _ZdaPvRKSt9nothrow_t, __builtin_vec_delete );
 
+#elif defined(VGO_solaris)
+ // operator delete[](void*, std::nothrow_t const&), GNU mangling
+ FREE(VG_Z_LIBSTDCXX_SONAME,  _ZdaPvRKSt9nothrow_t, __builtin_vec_delete );
+ FREE(SO_SYN_MALLOC,          _ZdaPvRKSt9nothrow_t, __builtin_vec_delete );
+
 #endif
 
 
@@ -632,6 +717,11 @@
  ZONECALLOC(VG_Z_LIBC_SONAME, malloc_zone_calloc);
  ZONECALLOC(SO_SYN_MALLOC,    malloc_zone_calloc);
 
+#elif defined(VGO_solaris)
+ CALLOC(VG_Z_LIBC_SONAME,      calloc);
+ CALLOC(VG_Z_LIBUMEM_SO_1,     calloc);
+ CALLOC(SO_SYN_MALLOC,         calloc);
+
 #endif
 
 
@@ -701,6 +791,11 @@
  ZONEREALLOC(VG_Z_LIBC_SONAME, malloc_zone_realloc);
  ZONEREALLOC(SO_SYN_MALLOC,    malloc_zone_realloc);
 
+#elif defined(VGO_solaris)
+ REALLOC(VG_Z_LIBC_SONAME,      realloc);
+ REALLOC(VG_Z_LIBUMEM_SO_1,     realloc);
+ REALLOC(SO_SYN_MALLOC,         realloc);
+
 #endif
 
 
@@ -769,6 +864,11 @@
  ZONEMEMALIGN(VG_Z_LIBC_SONAME, malloc_zone_memalign);
  ZONEMEMALIGN(SO_SYN_MALLOC,    malloc_zone_memalign);
 
+#elif defined(VGO_solaris)
+ MEMALIGN(VG_Z_LIBC_SONAME,      memalign);
+ MEMALIGN(VG_Z_LIBUMEM_SO_1,     memalign);
+ MEMALIGN(SO_SYN_MALLOC,         memalign);
+
 #endif
 
 
@@ -811,6 +911,11 @@
  ZONEVALLOC(VG_Z_LIBC_SONAME, malloc_zone_valloc);
  ZONEVALLOC(SO_SYN_MALLOC,    malloc_zone_valloc);
 
+#elif defined(VGO_solaris)
+ VALLOC(VG_Z_LIBC_SONAME,      valloc);
+ VALLOC(VG_Z_LIBUMEM_SO_1,     valloc);
+ VALLOC(SO_SYN_MALLOC,         valloc);
+
 #endif
 
 
@@ -919,6 +1024,10 @@
 #elif defined(VGO_darwin)
  //POSIX_MEMALIGN(VG_Z_LIBC_SONAME, posix_memalign);
 
+#elif defined(VGO_solaris)
+ POSIX_MEMALIGN(VG_Z_LIBC_SONAME, posix_memalign);
+ POSIX_MEMALIGN(SO_SYN_MALLOC,    posix_memalign);
+
 #endif
 
 
@@ -965,6 +1074,7 @@
 
 /* Bomb out if we get any of these. */
 
+static void panic(const char *str) __attribute__((unused));
 static void panic(const char *str)
 {
    VALGRIND_PRINTF_BACKTRACE("Program aborting because of call to %s\n", str);
diff --git a/coregrind/m_scheduler/scheduler.c b/coregrind/m_scheduler/scheduler.c
index 80fdf6a..62906ca 100644
--- a/coregrind/m_scheduler/scheduler.c
+++ b/coregrind/m_scheduler/scheduler.c
@@ -218,6 +218,8 @@
       case VEX_TRC_JMP_SYS_INT128:     return "INT128";
       case VEX_TRC_JMP_SYS_INT129:     return "INT129";
       case VEX_TRC_JMP_SYS_INT130:     return "INT130";
+      case VEX_TRC_JMP_SYS_INT145:     return "INT145";
+      case VEX_TRC_JMP_SYS_INT210:     return "INT210";
       case VEX_TRC_JMP_SYS_SYSENTER:   return "SYSENTER";
       case VEX_TRC_JMP_BORING:         return "VEX_BORING";
 
@@ -448,7 +450,13 @@
    /* 
       Tell the kernel we're yielding.
     */
+#  if defined(VGO_linux) || defined(VGO_darwin)
    VG_(do_syscall0)(__NR_sched_yield);
+#  elif defined(VGO_solaris)
+   VG_(do_syscall0)(__NR_yield);
+#  else
+#    error Unknown OS
+#  endif
 
    VG_(acquire_BigLock)(tid, "VG_(vg_yield)");
 }
@@ -492,6 +500,17 @@
    tst->os_state.remote_port       = 0;
    tst->os_state.msgh_id           = 0;
    VG_(memset)(&tst->os_state.mach_args, 0, sizeof(tst->os_state.mach_args));
+#  elif defined(VGO_solaris)
+#  if defined(VGP_x86_solaris)
+   tst->os_state.thrptr = 0;
+#  endif
+   tst->os_state.stk_id = (UWord)-1;
+   tst->os_state.ustack = NULL;
+   tst->os_state.in_door_return = False;
+   tst->os_state.door_return_procedure = 0;
+   tst->os_state.oldcontext = NULL;
+   tst->os_state.schedctl_data = 0;
+   tst->os_state.daemon_thread = False;
 #  else
 #    error "Unknown OS"
 #  endif
@@ -1409,7 +1428,10 @@
       case VEX_TRC_JMP_SYS_INT128:  /* x86-linux */
       case VEX_TRC_JMP_SYS_INT129:  /* x86-darwin */
       case VEX_TRC_JMP_SYS_INT130:  /* x86-darwin */
-      case VEX_TRC_JMP_SYS_SYSCALL: /* amd64-linux, ppc32-linux, amd64-darwin */
+      case VEX_TRC_JMP_SYS_INT145:  /* x86-solaris */
+      case VEX_TRC_JMP_SYS_INT210:  /* x86-solaris */
+      /* amd64-linux, ppc32-linux, amd64-darwin, amd64-solaris */
+      case VEX_TRC_JMP_SYS_SYSCALL:
 	 handle_syscall(tid, trc[0]);
 	 if (VG_(clo_sanity_level) > 2)
 	    VG_(sanity_check_general)(True); /* sanity-check every syscall */
@@ -1599,7 +1621,7 @@
 #        if defined(VGP_x86_linux)
          vg_assert2(0, "VG_(scheduler), phase 3: "
                        "sysenter_x86 on x86-linux is not supported");
-#        elif defined(VGP_x86_darwin)
+#        elif defined(VGP_x86_darwin) || defined(VGP_x86_solaris)
          /* return address in client edx */
          VG_(threads)[tid].arch.vex.guest_EIP
             = VG_(threads)[tid].arch.vex.guest_EDX;
diff --git a/coregrind/m_sigframe/sigframe-amd64-darwin.c b/coregrind/m_sigframe/sigframe-amd64-darwin.c
index 53014b8..803526b 100644
--- a/coregrind/m_sigframe/sigframe-amd64-darwin.c
+++ b/coregrind/m_sigframe/sigframe-amd64-darwin.c
@@ -145,6 +145,7 @@
    former case, the amd64 calling conventions will simply cause the
    extra 2 args to be ignored (inside the handler).  (We hope!) */
 void VG_(sigframe_create) ( ThreadId tid,
+                            Bool on_altstack,
                             Addr sp_top_of_frame,
                             const vki_siginfo_t *siginfo,
                             const struct vki_ucontext *siguc,
diff --git a/coregrind/m_sigframe/sigframe-amd64-linux.c b/coregrind/m_sigframe/sigframe-amd64-linux.c
index 0c1c7b4..761f9b4 100644
--- a/coregrind/m_sigframe/sigframe-amd64-linux.c
+++ b/coregrind/m_sigframe/sigframe-amd64-linux.c
@@ -451,6 +451,7 @@
 
 
 void VG_(sigframe_create)( ThreadId tid, 
+                            Bool on_altstack,
                             Addr rsp_top_of_frame,
                             const vki_siginfo_t *siginfo,
                             const struct vki_ucontext *siguc,
diff --git a/coregrind/m_sigframe/sigframe-arm-linux.c b/coregrind/m_sigframe/sigframe-arm-linux.c
index add7c82..01dbfbe 100644
--- a/coregrind/m_sigframe/sigframe-arm-linux.c
+++ b/coregrind/m_sigframe/sigframe-arm-linux.c
@@ -161,6 +161,7 @@
 
 /* EXPORTED */
 void VG_(sigframe_create)( ThreadId tid, 
+                           Bool on_altstack,
                            Addr sp_top_of_frame,
                            const vki_siginfo_t *siginfo,
                            const struct vki_ucontext *siguc,
diff --git a/coregrind/m_sigframe/sigframe-arm64-linux.c b/coregrind/m_sigframe/sigframe-arm64-linux.c
index 6e80659..c248585 100644
--- a/coregrind/m_sigframe/sigframe-arm64-linux.c
+++ b/coregrind/m_sigframe/sigframe-arm64-linux.c
@@ -152,6 +152,7 @@
 
 /* EXPORTED */
 void VG_(sigframe_create)( ThreadId tid, 
+                           Bool on_altstack,
                            Addr sp_top_of_frame,
                            const vki_siginfo_t *siginfo,
                            const struct vki_ucontext *siguc,
diff --git a/coregrind/m_sigframe/sigframe-common.c b/coregrind/m_sigframe/sigframe-common.c
index 48bd247..97419d6 100644
--- a/coregrind/m_sigframe/sigframe-common.c
+++ b/coregrind/m_sigframe/sigframe-common.c
@@ -49,7 +49,7 @@
    VG_TRACK( new_mem_stack_signal, addr - VG_STACK_REDZONE_SZB, size, tid );
 }
 
-#if defined(VGO_linux)
+#if defined(VGO_linux) || defined(VGO_solaris)
 
 /* Extend the stack segment downwards if needed so as to ensure the
    new signal frames are mapped to something.  Return a Bool
diff --git a/coregrind/m_sigframe/sigframe-mips32-linux.c b/coregrind/m_sigframe/sigframe-mips32-linux.c
index 4b23dc7..1e4f9ac 100644
--- a/coregrind/m_sigframe/sigframe-mips32-linux.c
+++ b/coregrind/m_sigframe/sigframe-mips32-linux.c
@@ -121,6 +121,7 @@
 
 /* EXPORTED */
 void VG_(sigframe_create)( ThreadId tid, 
+                           Bool on_altstack,
                            Addr sp_top_of_frame,
                            const vki_siginfo_t *siginfo,
                            const struct vki_ucontext *siguc,
diff --git a/coregrind/m_sigframe/sigframe-mips64-linux.c b/coregrind/m_sigframe/sigframe-mips64-linux.c
index 5e0f44a..c1f2637 100644
--- a/coregrind/m_sigframe/sigframe-mips64-linux.c
+++ b/coregrind/m_sigframe/sigframe-mips64-linux.c
@@ -116,6 +116,7 @@
 
 /* EXPORTED */
 void VG_(sigframe_create) ( ThreadId tid,
+                            Bool on_altstack,
                             Addr sp_top_of_frame,
                             const vki_siginfo_t *siginfo,
                             const struct vki_ucontext *siguc,
diff --git a/coregrind/m_sigframe/sigframe-ppc32-linux.c b/coregrind/m_sigframe/sigframe-ppc32-linux.c
index 8047f27..504480e 100644
--- a/coregrind/m_sigframe/sigframe-ppc32-linux.c
+++ b/coregrind/m_sigframe/sigframe-ppc32-linux.c
@@ -624,6 +624,7 @@
 
 /* EXPORTED */
 void VG_(sigframe_create)( ThreadId tid, 
+                           Bool on_altstack,
                            Addr sp_top_of_frame,
                            const vki_siginfo_t *siginfo,
                            const struct vki_ucontext *siguc,
diff --git a/coregrind/m_sigframe/sigframe-ppc64-linux.c b/coregrind/m_sigframe/sigframe-ppc64-linux.c
index ec4d883..5a7fa89 100644
--- a/coregrind/m_sigframe/sigframe-ppc64-linux.c
+++ b/coregrind/m_sigframe/sigframe-ppc64-linux.c
@@ -134,6 +134,7 @@
 
 /* EXPORTED */
 void VG_(sigframe_create)( ThreadId tid, 
+                           Bool on_altstack,
                            Addr sp_top_of_frame,
                            const vki_siginfo_t *siginfo,
                            const struct vki_ucontext *siguc,
diff --git a/coregrind/m_sigframe/sigframe-s390x-linux.c b/coregrind/m_sigframe/sigframe-s390x-linux.c
index fb70c76..2e1ad0c 100644
--- a/coregrind/m_sigframe/sigframe-s390x-linux.c
+++ b/coregrind/m_sigframe/sigframe-s390x-linux.c
@@ -403,6 +403,7 @@
 
 /* EXPORTED */
 void VG_(sigframe_create)( ThreadId tid,
+			   Bool on_altstack,
 			   Addr sp_top_of_frame,
 			   const vki_siginfo_t *siginfo,
 			   const struct vki_ucontext *siguc,
diff --git a/coregrind/m_sigframe/sigframe-solaris.c b/coregrind/m_sigframe/sigframe-solaris.c
new file mode 100644
index 0000000..2e28bd5
--- /dev/null
+++ b/coregrind/m_sigframe/sigframe-solaris.c
@@ -0,0 +1,270 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Create/destroy signal delivery frames.                       ---*/
+/*---                                           sigframe-solaris.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2011-2014 Petr Pavlu
+      setup@dagobah.cz
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_x86_solaris) || defined(VGP_amd64_solaris)
+
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_machine.h"
+#include "pub_core_options.h"
+#include "pub_core_signals.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_sigframe.h"      /* Self */
+#include "pub_core_syswrap.h"
+#include "priv_sigframe.h"
+
+/* This module creates and removes signal frames for signal deliveries
+   on x86/amd64-solaris. */
+
+/* Create a signal frame for thread 'tid'.  Make a 3-arg frame regardless of
+   whether the client originally requested a 1-arg version (no SA_SIGINFO) or
+   a 3-arg one (SA_SIGINFO) since in the former case, the x86/amd64 calling
+   conventions will simply cause the extra 2 args to be ignored (inside the
+   handler). */
+void VG_(sigframe_create)(ThreadId tid, Bool on_altstack,
+                          Addr sp_top_of_frame, const vki_siginfo_t *siginfo,
+                          const struct vki_ucontext *siguc,
+                          void *handler, UInt flags, const vki_sigset_t *mask,
+                          void *restorer)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   Addr esp;
+   vki_sigframe_t *frame;
+   Int signo = siginfo->si_signo;
+
+   /* Calculate new stack pointer. */
+   esp = sp_top_of_frame - sizeof(vki_sigframe_t);
+   esp = VG_ROUNDDN(esp, 16) - sizeof(UWord);
+
+   if (!ML_(sf_maybe_extend_stack)(tst, esp, sp_top_of_frame - esp, flags))
+      return;
+
+   /* Fill in the frame. */
+   frame = (vki_sigframe_t*)esp;
+
+   /* Set a bogus return address.  This return address should be never used
+      because to return from a signal handler a program has to call
+      setcontext() explicitly. */
+   frame->return_addr = (void*)~0UL;
+
+   /* Save current context.  (This has to be done before the thread state is
+      modified in any way.) */
+   VG_(save_context)(tid, &frame->ucontext, Vg_CoreSignal);
+
+   /* Fill in the siginfo. */
+   frame->siginfo = *siginfo;
+   /* Set expected si_addr value.
+
+      Manual page siginfo.h(3HEAD) describes that some signals define si_addr
+      to be an address of the faulting instruction (SIGILL). Then it is needed
+      to change the real CPU address to the VCPU address. Some signals define
+      si_addr to be an address of the faulting memory reference (SIGSEGV,
+      SIGBUS). Then the address should be passed unmodified.
+
+      However documentation contained in the manpage does not reflect the
+      reality found in the Solaris kernel - uts/<arch>/os/trap.c. Here one can
+      observe that in some cases si_addr is set to address provided by the
+      underlying subsystem. In some cases si_addr is set to the current
+      program counter. Other signals are missing documentation altogether.
+      It is almost impossible to determine what value is stored in si_addr
+      based on the information provided by kernel to the signal handler.
+
+      POSIX.1-2008 says about si_addr:
+      SIGILL, SIGFPE ... Address of faulting instruction.
+      SIGSEGV, SIGBUS ... Address of faulting memory reference.
+      For some implementations, the value of si_addr may be inaccurate.
+
+      See tests none/tests/faultstatus and none/tests/x86/badseg for examples.
+      The code below simply follows the POSIX standard, but propagates any
+      possibly incorrect values from the kernel to the user.
+    */
+   switch (signo) {
+   case VKI_SIGSEGV:
+      switch (siginfo->si_code) {
+      case VKI_SEGV_ACCERR:
+      case VKI_SEGV_MAPERR:
+      default:
+         break;
+      case VKI_SEGV_MADE_UP_GPF:
+         /* Translate si_code synthesized by Valgrind to SEGV_MAPPER. */
+         frame->siginfo.si_code = VKI_SEGV_MAPERR;
+         break;
+      }
+      break;
+   case VKI_SIGBUS:
+      break;
+   case VKI_SIGFPE:
+   case VKI_SIGILL:
+   case VKI_SIGTRAP:
+      frame->siginfo.si_addr = (void*)VG_(get_IP)(tid);
+      break;
+   case VKI_SIGPROF:
+      frame->siginfo.si_faddr = (void*)VG_(get_IP)(tid);
+      break;
+   default:
+      break;
+   }
+   VG_TRACK(post_mem_write, Vg_CoreSignal, tid, (Addr)&frame->siginfo,
+            sizeof(frame->siginfo));
+
+   /* Save the signal number in an unused slot.  Later, when a return from the
+      signal is made, this value is used to inform the tool that the
+      processing for the given signal has ended. */
+   VKI_UC_SIGNO(&frame->ucontext) = signo | ((~(UWord)signo & 0xFFFF) << 16);
+   /* Old context has to point to the saved ucontext. */
+   tst->os_state.oldcontext = &frame->ucontext;
+   /* Save ERR and TRAPNO if siguc is present. */
+   if (siguc) {
+      frame->ucontext.uc_mcontext.gregs[VKI_REG_ERR]
+         = siguc->uc_mcontext.gregs[VKI_REG_ERR];
+      VG_TRACK(post_mem_write, Vg_CoreSignal, tid,
+               (Addr)&frame->ucontext.uc_mcontext.gregs[VKI_REG_ERR],
+               sizeof(UWord));
+      frame->ucontext.uc_mcontext.gregs[VKI_REG_TRAPNO]
+         = siguc->uc_mcontext.gregs[VKI_REG_TRAPNO];
+      VG_TRACK(post_mem_write, Vg_CoreSignal, tid,
+               (Addr)&frame->ucontext.uc_mcontext.gregs[VKI_REG_TRAPNO],
+               sizeof(UWord));
+   }
+
+   /* Prepare parameters for a signal handler. */
+   frame->a1_signo = signo;
+   /* The first parameter has to be 16-byte aligned, resembling function
+      calls. */
+   {
+      /* Using
+         vg_assert(VG_IS_16_ALIGNED(&frame->a1_signo));
+         seems to get miscompiled on amd64 with GCC 4.7.2. */
+      Addr signo_addr = (Addr)&frame->a1_signo;
+      vg_assert(VG_IS_16_ALIGNED(signo_addr));
+   }
+   frame->a2_siginfo = &frame->siginfo;
+   VG_TRACK(post_mem_write, Vg_CoreSignal, tid, (Addr)&frame->a1_signo,
+            sizeof(frame->a1_signo) + sizeof(frame->a2_siginfo));
+#if defined(VGP_x86_solaris)
+   frame->a3_ucontext = &frame->ucontext;
+   VG_TRACK(post_mem_write, Vg_CoreSignal, tid, (Addr)&frame->a3_ucontext,
+            sizeof(frame->a3_ucontext));
+#elif defined(VGP_amd64_solaris)
+   tst->arch.vex.guest_RDI = signo;
+   VG_TRACK(post_reg_write, Vg_CoreSignal, tid, offsetof(VexGuestAMD64State,
+            guest_RDI), sizeof(ULong));
+   tst->arch.vex.guest_RSI = (Addr)&frame->siginfo;
+   VG_TRACK(post_reg_write, Vg_CoreSignal, tid, offsetof(VexGuestAMD64State,
+            guest_RSI), sizeof(ULong));
+   tst->arch.vex.guest_RDX = (Addr)&frame->ucontext;
+   VG_TRACK(post_reg_write, Vg_CoreSignal, tid, offsetof(VexGuestAMD64State,
+            guest_RDX), sizeof(ULong));
+#endif
+
+   /* Set up the stack pointer. */
+   vg_assert(esp == (Addr)&frame->return_addr);
+   VG_(set_SP)(tid, esp);
+   VG_TRACK(post_reg_write, Vg_CoreSignal, tid, VG_O_STACK_PTR, sizeof(Addr));
+
+   /* Set up the program counter. Note that we don't inform a tool about IP
+      write because IP is always defined. */
+   VG_(set_IP)(tid, (Addr)handler);
+
+   /* If the signal is delivered on the alternate stack, copy it out to
+      ustack.  This has to be done after setting a new IP so the SS_ONSTACK
+      flag is set by VG_(do_sys_sigaltstack)(). */
+   if (on_altstack && tst->os_state.ustack
+       && VG_(am_is_valid_for_client)((Addr)tst->os_state.ustack,
+                                      sizeof(*tst->os_state.ustack),
+                                      VKI_PROT_WRITE)) {
+      SysRes res;
+      vki_stack_t altstack;
+
+      /* Get information about alternate stack. */
+      res = VG_(do_sys_sigaltstack)(tid, NULL, &altstack);
+      vg_assert(!sr_isError(res));
+
+      /* Copy it to ustack. */
+      *tst->os_state.ustack = altstack;
+      VG_TRACK(post_mem_write, Vg_CoreSignal, tid, (Addr)tst->os_state.ustack,
+               sizeof(*tst->os_state.ustack));
+   }
+
+   if (VG_(clo_trace_signals))
+      VG_(message)(Vg_DebugMsg,
+                   "sigframe_create (thread %d): next IP=%#lx, "
+                   "next SP=%#lx\n",
+                   tid, (Addr)handler, (Addr)frame);
+}
+
+void VG_(sigframe_destroy)(ThreadId tid, Bool isRT)
+{
+   /* Not used on Solaris. */
+   vg_assert(0);
+}
+
+void VG_(sigframe_return)(ThreadId tid, const vki_ucontext_t *uc)
+{
+   Int signo;
+
+   /* Check if a signal number was saved in the restored context. */
+   signo = VKI_UC_SIGNO_CONST(uc) & 0xFFFF;
+   if (!signo || signo != ((~VKI_UC_SIGNO_CONST(uc) >> 16) & 0xFFFF))
+      return;
+
+   /* Note: The active tool should be informed here about the dead stack area.
+      However, this was already done when the original context was restored (in
+      VG_(restore_context)()) so it is not necessary to do it here again.
+
+      There is a small nuance though, VG_(restore_context)() triggers the
+      die_mem_stack event while in this case, it should really trigger the
+      die_mem_stack_signal event.  This is not currently a problem because all
+      official tools handle these two events in the same way.
+
+      If a return from an alternate stack is made then no die_mem_stack event
+      is currently triggered. */
+
+   /* Returning from a signal handler. */
+   if (VG_(clo_trace_signals))
+      VG_(message)(Vg_DebugMsg,
+                   "sigframe_return (thread %d): IP=%#lx\n",
+                   tid, VG_(get_IP)(tid));
+
+   /* Tell the tool. */
+   VG_TRACK(post_deliver_signal, tid, signo);
+}
+
+#endif // defined(VGP_x86_solaris) || defined(VGP_amd64_solaris)
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_sigframe/sigframe-x86-darwin.c b/coregrind/m_sigframe/sigframe-x86-darwin.c
index 540378a..8e77f60 100644
--- a/coregrind/m_sigframe/sigframe-x86-darwin.c
+++ b/coregrind/m_sigframe/sigframe-x86-darwin.c
@@ -133,6 +133,7 @@
    former case, the x86 calling conventions will simply cause the
    extra 2 args to be ignored (inside the handler). */
 void VG_(sigframe_create) ( ThreadId tid,
+                            Bool on_altstack,
                             Addr sp_top_of_frame,
                             const vki_siginfo_t *siginfo,
                             const struct vki_ucontext *siguc,
diff --git a/coregrind/m_sigframe/sigframe-x86-linux.c b/coregrind/m_sigframe/sigframe-x86-linux.c
index 3db2251..ce33f73 100644
--- a/coregrind/m_sigframe/sigframe-x86-linux.c
+++ b/coregrind/m_sigframe/sigframe-x86-linux.c
@@ -535,6 +535,7 @@
 
 /* EXPORTED */
 void VG_(sigframe_create)( ThreadId tid, 
+                           Bool on_altstack,
                            Addr esp_top_of_frame,
                            const vki_siginfo_t *siginfo,
                            const struct vki_ucontext *siguc,
diff --git a/coregrind/m_signals.c b/coregrind/m_signals.c
index 4aa24f5..e231f3d 100644
--- a/coregrind/m_signals.c
+++ b/coregrind/m_signals.c
@@ -566,6 +566,7 @@
         (srP)->misc.MIPS64.r31 = (uc)->uc_mcontext.sc_regs[31]; \
         (srP)->misc.MIPS64.r28 = (uc)->uc_mcontext.sc_regs[28]; \
       }
+
 #elif defined(VGP_tilegx_linux)
 #  define VG_UCONTEXT_INSTR_PTR(uc)       ((uc)->uc_mcontext.pc)
 #  define VG_UCONTEXT_STACK_PTR(uc)       ((uc)->uc_mcontext.sp)
@@ -580,6 +581,34 @@
         (srP)->misc.TILEGX.r52 = (uc)->uc_mcontext.gregs[52];  \
         (srP)->misc.TILEGX.r55 = (uc)->uc_mcontext.lr;         \
       }
+
+#elif defined(VGP_x86_solaris)
+#  define VG_UCONTEXT_INSTR_PTR(uc)       ((Addr)(uc)->uc_mcontext.gregs[VKI_EIP])
+#  define VG_UCONTEXT_STACK_PTR(uc)       ((Addr)(uc)->uc_mcontext.gregs[VKI_UESP])
+#  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                               \
+      VG_(mk_SysRes_x86_solaris)((uc)->uc_mcontext.gregs[VKI_EFL] & 1, \
+                                 (uc)->uc_mcontext.gregs[VKI_EAX],     \
+                                 (uc)->uc_mcontext.gregs[VKI_EFL] & 1  \
+                                 ? 0 : (uc)->uc_mcontext.gregs[VKI_EDX])
+#  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)                      \
+      { (srP)->r_pc = (ULong)(uc)->uc_mcontext.gregs[VKI_EIP];         \
+        (srP)->r_sp = (ULong)(uc)->uc_mcontext.gregs[VKI_UESP];        \
+        (srP)->misc.X86.r_ebp = (uc)->uc_mcontext.gregs[VKI_EBP];      \
+      }
+
+#elif defined(VGP_amd64_solaris)
+#  define VG_UCONTEXT_INSTR_PTR(uc)       ((Addr)(uc)->uc_mcontext.gregs[VKI_REG_RIP])
+#  define VG_UCONTEXT_STACK_PTR(uc)       ((Addr)(uc)->uc_mcontext.gregs[VKI_REG_RSP])
+#  define VG_UCONTEXT_SYSCALL_SYSRES(uc)                                     \
+      VG_(mk_SysRes_amd64_solaris)((uc)->uc_mcontext.gregs[VKI_REG_RFL] & 1, \
+                                   (uc)->uc_mcontext.gregs[VKI_REG_RAX],     \
+                                   (uc)->uc_mcontext.gregs[VKI_REG_RFL] & 1  \
+                                   ? 0 : (uc)->uc_mcontext.gregs[VKI_REG_RDX])
+#  define VG_UCONTEXT_TO_UnwindStartRegs(srP, uc)                            \
+      { (srP)->r_pc = (uc)->uc_mcontext.gregs[VKI_REG_RIP];                  \
+        (srP)->r_sp = (uc)->uc_mcontext.gregs[VKI_REG_RSP];                  \
+        (srP)->misc.AMD64.r_rbp = (uc)->uc_mcontext.gregs[VKI_REG_RBP];      \
+      }
 #else
 #  error Unknown platform
 #endif
@@ -592,7 +621,7 @@
 #if defined(VGO_linux)
 #  define VKI_SIGINFO_si_addr  _sifields._sigfault._addr
 #  define VKI_SIGINFO_si_pid   _sifields._kill._pid
-#elif defined(VGO_darwin)
+#elif defined(VGO_darwin) || defined(VGO_solaris)
 #  define VKI_SIGINFO_si_addr  si_addr
 #  define VKI_SIGINFO_si_pid   si_pid
 #else
@@ -667,6 +696,7 @@
      SA_NOCLDSTOP -- passed to kernel
      SA_ONESHOT or SA_RESETHAND -- pass through
      SA_RESTART -- we observe this but set our handlers to always restart
+                   (this doesn't apply to the Solaris port)
      SA_NOMASK or SA_NODEFER -- we observe this, but our handlers block everything
      SA_ONSTACK -- pass through
      SA_NOCLDWAIT -- pass through
@@ -796,12 +826,20 @@
 
       /* SA_ONESHOT: ignore client setting */
       
+#     if !defined(VGO_solaris)
       /* SA_RESTART: ignore client setting and always set it for us.
 	 Though we never rely on the kernel to restart a
 	 syscall, we observe whether it wanted to restart the syscall
 	 or not, which is needed by 
          VG_(fixup_guest_state_after_syscall_interrupted) */
       skss_flags |= VKI_SA_RESTART;
+#else
+      /* The above does not apply to the Solaris port, where the kernel does
+         not directly restart syscalls, but instead it checks SA_RESTART flag
+         and if it is set then it returns ERESTART to libc and the library
+         actually restarts the syscall. */
+      skss_flags |= scss_flags & VKI_SA_RESTART;
+#     endif
 
       /* SA_NOMASK: ignore it */
 
@@ -965,6 +1003,15 @@
    " swint1\n" \
    ".previous\n"
 
+#elif defined(VGP_x86_solaris) || defined(VGP_amd64_solaris)
+/* Not used on Solaris. */
+#  define _MY_SIGRETURN(name) \
+   ".text\n" \
+   ".globl my_sigreturn\n" \
+   "my_sigreturn:\n" \
+   "ud2\n" \
+   ".previous\n"
+
 #else
 #  error Unknown platform
 #endif
@@ -1008,7 +1055,7 @@
       ksa.sa_flags    = skss.skss_per_sig[sig].skss_flags;
 #     if !defined(VGP_ppc32_linux) && \
          !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
-         !defined(VGP_mips32_linux)
+         !defined(VGP_mips32_linux) && !defined(VGO_solaris)
       ksa.sa_restorer = my_sigreturn;
 #     endif
       /* Re above ifdef (also the assertion below), PaulM says:
@@ -1038,11 +1085,24 @@
       if (!force_update) {
          vg_assert(ksa_old.ksa_handler 
                    == skss_old.skss_per_sig[sig].skss_handler);
+#        if defined(VGO_solaris)
+         if (ksa_old.ksa_handler == VKI_SIG_DFL
+               || ksa_old.ksa_handler == VKI_SIG_IGN) {
+            /* The Solaris kernel ignores signal flags (except SA_NOCLDWAIT
+               and SA_NOCLDSTOP) and a signal mask if a handler is set to
+               SIG_DFL or SIG_IGN. */
+            skss_old.skss_per_sig[sig].skss_flags
+               &= (VKI_SA_NOCLDWAIT | VKI_SA_NOCLDSTOP);
+            vg_assert(VG_(isemptysigset)( &ksa_old.sa_mask ));
+            VG_(sigfillset)( &ksa_old.sa_mask );
+         }
+#        endif
          vg_assert(ksa_old.sa_flags 
                    == skss_old.skss_per_sig[sig].skss_flags);
 #        if !defined(VGP_ppc32_linux) && \
             !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
-            !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux)
+            !defined(VGP_mips32_linux) && !defined(VGP_mips64_linux) && \
+            !defined(VGO_solaris)
          vg_assert(ksa_old.sa_restorer == my_sigreturn);
 #        endif
          VG_(sigaddset)( &ksa_old.sa_mask, VKI_SIGKILL );
@@ -1162,7 +1222,8 @@
       old_act->ksa_handler = scss.scss_per_sig[signo].scss_handler;
       old_act->sa_flags    = scss.scss_per_sig[signo].scss_flags;
       old_act->sa_mask     = scss.scss_per_sig[signo].scss_mask;
-#     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
+#     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+         !defined(VGO_solaris)
       old_act->sa_restorer = scss.scss_per_sig[signo].scss_restorer;
 #     endif
    }
@@ -1174,7 +1235,8 @@
       scss.scss_per_sig[signo].scss_mask     = new_act->sa_mask;
 
       scss.scss_per_sig[signo].scss_restorer = NULL;
-#     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
+#     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+         !defined(VGO_solaris)
       scss.scss_per_sig[signo].scss_restorer = new_act->sa_restorer;
 #     endif
 
@@ -1369,6 +1431,7 @@
 void push_signal_frame ( ThreadId tid, const vki_siginfo_t *siginfo,
                                        const struct vki_ucontext *uc )
 {
+   Bool         on_altstack;
    Addr         esp_top_of_frame;
    ThreadState* tst;
    Int		sigNo = siginfo->si_signo;
@@ -1389,6 +1452,7 @@
              arch/i386/kernel/signal.c. */
           sas_ss_flags(tid, VG_(get_SP)(tid)) == 0
       ) {
+      on_altstack = True;
       esp_top_of_frame 
          = (Addr)(tst->altstack.ss_sp) + tst->altstack.ss_size;
       if (VG_(clo_trace_signals))
@@ -1397,24 +1461,21 @@
                    sigNo, VG_(signame)(sigNo), tid, tst->altstack.ss_sp,
                    (UChar *)tst->altstack.ss_sp + tst->altstack.ss_size,
                    (Word)tst->altstack.ss_size );
-
-      /* Signal delivery to tools */
-      VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/True );
-      
    } else {
+      on_altstack = False;
       esp_top_of_frame = VG_(get_SP)(tid) - VG_STACK_REDZONE_SZB;
-
-      /* Signal delivery to tools */
-      VG_TRACK( pre_deliver_signal, tid, sigNo, /*alt_stack*/False );
    }
 
+   /* Signal delivery to tools */
+   VG_TRACK( pre_deliver_signal, tid, sigNo, on_altstack );
+
    vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_IGN);
    vg_assert(scss.scss_per_sig[sigNo].scss_handler != VKI_SIG_DFL);
 
    /* This may fail if the client stack is busted; if that happens,
       the whole process will exit rather than simply calling the
       signal handler. */
-   VG_(sigframe_create) (tid, esp_top_of_frame, siginfo, uc,
+   VG_(sigframe_create) (tid, on_altstack, esp_top_of_frame, siginfo, uc,
                          scss.scss_per_sig[sigNo].scss_handler,
                          scss.scss_per_sig[sigNo].scss_flags,
                          &tst->sig_mask,
@@ -1439,6 +1500,7 @@
       case VKI_SIGUSR1:   return "SIGUSR1";
       case VKI_SIGUSR2:   return "SIGUSR2";
       case VKI_SIGSEGV:   return "SIGSEGV";
+      case VKI_SIGSYS:    return "SIGSYS";
       case VKI_SIGPIPE:   return "SIGPIPE";
       case VKI_SIGALRM:   return "SIGALRM";
       case VKI_SIGTERM:   return "SIGTERM";
@@ -1461,10 +1523,42 @@
 #     if defined(VKI_SIGPWR)
       case VKI_SIGPWR:    return "SIGPWR";
 #     endif
-#     if defined(VKI_SIGUNUSED)
+#     if defined(VKI_SIGUNUSED) && (VKI_SIGUNUSED != VKI_SIGSYS)
       case VKI_SIGUNUSED: return "SIGUNUSED";
 #     endif
 
+      /* Solaris-specific signals. */
+#     if defined(VKI_SIGEMT)
+      case VKI_SIGEMT:    return "SIGEMT";
+#     endif
+#     if defined(VKI_SIGWAITING)
+      case VKI_SIGWAITING: return "SIGWAITING";
+#     endif
+#     if defined(VKI_SIGLWP)
+      case VKI_SIGLWP:    return "SIGLWP";
+#     endif
+#     if defined(VKI_SIGFREEZE)
+      case VKI_SIGFREEZE: return "SIGFREEZE";
+#     endif
+#     if defined(VKI_SIGTHAW)
+      case VKI_SIGTHAW:   return "SIGTHAW";
+#     endif
+#     if defined(VKI_SIGCANCEL)
+      case VKI_SIGCANCEL: return "SIGCANCEL";
+#     endif
+#     if defined(VKI_SIGLOST)
+      case VKI_SIGLOST:   return "SIGLOST";
+#     endif
+#     if defined(VKI_SIGXRES)
+      case VKI_SIGXRES:   return "SIGXRES";
+#     endif
+#     if defined(VKI_SIGJVM1)
+      case VKI_SIGJVM1:   return "SIGJVM1";
+#     endif
+#     if defined(VKI_SIGJVM2)
+      case VKI_SIGJVM2:   return "SIGJVM2";
+#     endif
+
 #  if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
    case VKI_SIGRTMIN ... VKI_SIGRTMAX:
       VG_(sprintf)(buf, "SIGRT%d", sigNo-VKI_SIGRTMIN);
@@ -1487,7 +1581,8 @@
 
    sa.ksa_handler = VKI_SIG_DFL;
    sa.sa_flags = 0;
-#  if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
+#  if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+      !defined(VGO_solaris)
    sa.sa_restorer = 0;
 #  endif
    VG_(sigemptyset)(&sa.sa_mask);
@@ -1499,7 +1594,7 @@
    VG_(sigprocmask)(VKI_SIG_UNBLOCK, &mask, &origmask);
 
    r = VG_(kill)(VG_(getpid)(), sigNo);
-#  if defined(VGO_linux)
+#  if !defined(VGO_darwin)
    /* This sometimes fails with EPERM on Darwin.  I don't know why. */
    vg_assert(r == 0);
 #  endif
@@ -1517,7 +1612,7 @@
 // pass in some other details that can help when si_code is unreliable.
 static Bool is_signal_from_kernel(ThreadId tid, int signum, int si_code)
 {
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    // On Linux, SI_USER is zero, negative values are from the user, positive
    // values are from the kernel.  There are SI_FROMUSER and SI_FROMKERNEL
    // macros but we don't use them here because other platforms don't have
@@ -1558,11 +1653,6 @@
 #  endif
 }
 
-// This is an arbitrary si_code that we only use internally.  It corresponds
-// to the value SI_KERNEL on Linux, but that's not really of any significance
-// as far as I can determine.
-#define VKI_SEGV_MADE_UP_GPF    0x80
-
 /* 
    Perform the default action of a signal.  If the signal is fatal, it
    marks all threads as needing to exit, but it doesn't actually kill
@@ -1589,8 +1679,15 @@
       case VKI_SIGSEGV:	/* core */
       case VKI_SIGBUS:	/* core */
       case VKI_SIGTRAP:	/* core */
+      case VKI_SIGSYS:	/* core */
       case VKI_SIGXCPU:	/* core */
       case VKI_SIGXFSZ:	/* core */
+
+      /* Solaris-specific signals. */
+#     if defined(VKI_SIGEMT)
+      case VKI_SIGEMT:	/* core */
+#     endif
+
          terminate = True;
          core = True;
          break;
@@ -1607,12 +1704,17 @@
 #     if defined(VKI_SIGPWR)
       case VKI_SIGPWR:	/* term */
 #     endif
-      case VKI_SIGSYS:	/* term */
       case VKI_SIGPROF:	/* term */
       case VKI_SIGVTALRM:	/* term */
 #     if defined(VKI_SIGRTMIN) && defined(VKI_SIGRTMAX)
       case VKI_SIGRTMIN ... VKI_SIGRTMAX: /* term */
 #     endif
+
+      /* Solaris-specific signals. */
+#     if defined(VKI_SIGLOST)
+      case VKI_SIGLOST:	/* term */
+#     endif
+
          terminate = True;
          break;
    }
@@ -1702,6 +1804,11 @@
 	    case VKI_FPE_FLTRES: event = "FP inexact"; break;
 	    case VKI_FPE_FLTINV: event = "FP invalid operation"; break;
 	    case VKI_FPE_FLTSUB: event = "FP subscript out of range"; break;
+
+            /* Solaris-specific codes. */
+#           if defined(VKI_FPE_FLTDEN)
+	    case VKI_FPE_FLTDEN: event = "FP denormalize"; break;
+#           endif
 	    }
 	    break;
 
@@ -1727,7 +1834,7 @@
          likely cause a segfault. */
       if (VG_(is_valid_tid)(tid)) {
          Word first_ip_delta = 0;
-#if defined(VGO_linux)
+#if defined(VGO_linux) || defined(VGO_solaris)
          /* Make sure that the address stored in the stack pointer is 
             located in a mapped page. That is not necessarily so. E.g.
             consider the scenario where the stack pointer was decreased
@@ -2027,6 +2134,9 @@
    uc.uc_mcontext = &mc;
    uc.uc_mcontext->__es.__trapno = 3;
    uc.uc_mcontext->__es.__err = 0;
+#  elif defined(VGP_x86_solaris)
+   uc.uc_mcontext.gregs[VKI_ERR] = 0;
+   uc.uc_mcontext.gregs[VKI_TRAPNO] = VKI_T_BPTFLT;
 #  endif
 
    /* fixs390: do we need to do anything here for s390 ? */
@@ -2162,13 +2272,117 @@
       mask them off) sign extends them when exporting to user space so
       we do the same thing here. */
    return (Short)si_code;
-#elif defined(VGO_darwin)
+#elif defined(VGO_darwin) || defined(VGO_solaris)
    return si_code;
 #else
 #  error Unknown OS
 #endif
 }
 
+#if defined(VGO_solaris)
+/* Following function is used to switch Valgrind from a client stack back onto
+   a Valgrind stack.  It is used only when the door_return call was invoked by
+   the client because this is the only syscall which is executed directly on
+   the client stack (see syscall-{x86,amd64}-solaris.S).  The switch onto the
+   Valgrind stack has to be made as soon as possible because there is no
+   guarantee that there is enough space on the client stack to run the
+   complete signal machinery.  Also, Valgrind has to be switched back onto its
+   stack before a simulated signal frame is created because that will
+   overwrite the real sigframe built by the kernel. */
+static void async_signalhandler_solaris_preprocess(ThreadId tid, Int *signo,
+                                                   vki_siginfo_t *info,
+                                                   struct vki_ucontext *uc)
+{
+#  define RECURSION_BIT 0x1000
+   Addr sp;
+   vki_sigframe_t *frame;
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   Int rec_signo;
+
+   /* If not doing door_return then return instantly. */
+   if (!tst->os_state.in_door_return)
+      return;
+
+   /* Check for the recursion:
+      v ...
+      | async_signalhandler - executed on the client stack
+      v async_signalhandler_solaris_preprocess - first call switches the
+      |   stacks and sets the RECURSION_BIT flag
+      v async_signalhandler - executed on the Valgrind stack
+      | async_signalhandler_solaris_preprocess - the RECURSION_BIT flag is
+      v   set, clear it and return
+    */
+   if (*signo & RECURSION_BIT) {
+      *signo &= ~RECURSION_BIT;
+      return;
+   }
+
+   rec_signo = *signo | RECURSION_BIT;
+
+#  if defined(VGP_x86_solaris)
+   /* Register %ebx/%rbx points to the top of the original V stack. */
+   sp = uc->uc_mcontext.gregs[VKI_EBX];
+#  elif defined(VGP_amd64_solaris)
+   sp = uc->uc_mcontext.gregs[VKI_REG_RBX];
+#  else
+#    error "Unknown platform"
+#  endif
+
+   /* Build a fake signal frame, similarly as in sigframe-solaris.c. */
+   /* Calculate a new stack pointer. */
+   sp -= sizeof(vki_sigframe_t);
+   sp = VG_ROUNDDN(sp, 16) - sizeof(UWord);
+
+   /* Fill in the frame. */
+   frame = (vki_sigframe_t*)sp;
+   /* Set a bogus return address. */
+   frame->return_addr = (void*)~0UL;
+   frame->a1_signo = rec_signo;
+   /* The first parameter has to be 16-byte aligned, resembling a function
+      call. */
+   {
+      /* Using
+         vg_assert(VG_IS_16_ALIGNED(&frame->a1_signo));
+         seems to get miscompiled on amd64 with GCC 4.7.2. */
+      Addr signo_addr = (Addr)&frame->a1_signo;
+      vg_assert(VG_IS_16_ALIGNED(signo_addr));
+   }
+   frame->a2_siginfo = &frame->siginfo;
+   frame->siginfo = *info;
+   frame->ucontext = *uc;
+
+#  if defined(VGP_x86_solaris)
+   frame->a3_ucontext = &frame->ucontext;
+
+   /* Switch onto the V stack and restart the signal processing. */
+   __asm__ __volatile__(
+      "xorl %%ebp, %%ebp\n"
+      "movl %[sp], %%esp\n"
+      "jmp async_signalhandler\n"
+      :
+      : [sp] "a" (sp)
+      : /*"ebp"*/);
+
+#  elif defined(VGP_amd64_solaris)
+   __asm__ __volatile__(
+      "xorq %%rbp, %%rbp\n"
+      "movq %[sp], %%rsp\n"
+      "jmp async_signalhandler\n"
+      :
+      : [sp] "a" (sp), "D" (rec_signo), "S" (&frame->siginfo),
+        "d" (&frame->ucontext)
+      : /*"rbp"*/);
+#  else
+#    error "Unknown platform"
+#  endif
+
+   /* We should never get here. */
+   vg_assert(0);
+
+#  undef RECURSION_BIT
+}
+#endif
+
 /* 
    Receive an async signal from the kernel.
 
@@ -2183,8 +2397,13 @@
    ThreadState* tst = VG_(get_ThreadState)(tid);
    SysRes       sres;
 
-   /* The thread isn't currently running, make it so before going on */
    vg_assert(tst->status == VgTs_WaitSys);
+
+#  if defined(VGO_solaris)
+   async_signalhandler_solaris_preprocess(tid, &sigNo, info, uc);
+#  endif
+
+   /* The thread isn't currently running, make it so before going on */
    VG_(acquire_BigLock)(tid, "async_signalhandler");
 
    info->si_code = sanitize_si_code(info->si_code);
@@ -2234,7 +2453,8 @@
       tid, 
       VG_UCONTEXT_INSTR_PTR(uc), 
       sres,  
-      !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART)
+      !!(scss.scss_per_sig[sigNo].scss_flags & VKI_SA_RESTART),
+      uc
    );
 
    /* (2) */
@@ -2345,7 +2565,13 @@
       of a faulting instruction), then how we treat it depends on when it
       arrives... */
 
-   if (VG_(threads)[tid].status == VgTs_WaitSys) {
+   if (VG_(threads)[tid].status == VgTs_WaitSys
+#     if defined(VGO_solaris)
+      /* Check if the signal was really received while doing a blocking
+         syscall.  Only then the async_signalhandler() path can be used. */
+       && VG_(is_ip_in_blocking_syscall)(tid, VG_UCONTEXT_INSTR_PTR(uc))
+#     endif
+         ) {
       /* Signal arrived while we're blocked in a syscall.  This means that
          the client's signal mask was applied.  In other words, so we can't
          get here unless the client wants this signal right now.  This means
@@ -2605,6 +2831,11 @@
    } else {
       sync_signalhandler_from_kernel(tid, sigNo, info, uc);
    }
+
+#  if defined(VGO_solaris)
+   /* On Solaris we have to return from signal handler manually. */
+   VG_(do_syscall2)(__NR_context, VKI_SETCONTEXT, (UWord)uc);
+#  endif
 }
 
 
@@ -2653,7 +2884,8 @@
    VG_(printf)("pp_ksigaction: handler %p, flags 0x%x, restorer %p\n", 
                sa->ksa_handler, 
                (UInt)sa->sa_flags, 
-#              if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
+#              if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+                  !defined(VGO_solaris)
                   sa->sa_restorer
 #              else
                   (void*)0
@@ -2675,7 +2907,8 @@
 
    sa.ksa_handler = VKI_SIG_DFL;
    sa.sa_flags = 0;
-#  if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
+#  if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+      !defined(VGO_solaris)
    sa.sa_restorer = 0;
 #  endif
    VG_(sigemptyset)(&sa.sa_mask);
@@ -2777,7 +3010,8 @@
 
 	 tsa.ksa_handler = (void *)sync_signalhandler;
 	 tsa.sa_flags = VKI_SA_SIGINFO;
-#        if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
+#        if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+            !defined(VGO_solaris)
 	 tsa.sa_restorer = 0;
 #        endif
 	 VG_(sigfillset)(&tsa.sa_mask);
@@ -2804,7 +3038,8 @@
       scss.scss_per_sig[i].scss_mask     = sa.sa_mask;
 
       scss.scss_per_sig[i].scss_restorer = NULL;
-#     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin)
+#     if !defined(VGP_x86_darwin) && !defined(VGP_amd64_darwin) && \
+         !defined(VGO_solaris)
       scss.scss_per_sig[i].scss_restorer = sa.sa_restorer;
 #     endif
 
diff --git a/coregrind/m_stacktrace.c b/coregrind/m_stacktrace.c
index aca2d20..005765a 100644
--- a/coregrind/m_stacktrace.c
+++ b/coregrind/m_stacktrace.c
@@ -92,7 +92,8 @@
    
 /* ------------------------ x86 ------------------------- */
 
-#if defined(VGP_x86_linux) || defined(VGP_x86_darwin)
+#if defined(VGP_x86_linux) || defined(VGP_x86_darwin) \
+    || defined(VGP_x86_solaris)
 
 #define N_FP_CF_VERIF 1021
 // prime number so that size of fp_CF_verif is just below 4K or 8K
@@ -234,10 +235,18 @@
    /* vg_assert(fp_min <= fp_max);*/
    // On Darwin, this kicks in for pthread-related stack traces, so they're
    // only 1 entry long which is wrong.
-#  if !defined(VGO_darwin)
+#  if defined(VGO_linux)
    if (fp_min + 512 >= fp_max) {
       /* If the stack limits look bogus, don't poke around ... but
          don't bomb out either. */
+#  elif defined(VGO_solaris)
+   if (fp_max == 0) {
+      /* VG_(get_StackTrace)() can be called by tools very early when
+         various tracing options are enabled. Don't proceed further
+         if the stack limits look bogus.
+       */
+#  endif
+#  if defined(VGO_linux) || defined(VGO_solaris)
       if (sps) sps[0] = uregs.xsp;
       if (fps) fps[0] = uregs.xbp;
       ips[0] = uregs.xip;
@@ -473,7 +482,8 @@
 
 /* ----------------------- amd64 ------------------------ */
 
-#if defined(VGP_amd64_linux) || defined(VGP_amd64_darwin)
+#if defined(VGP_amd64_linux) || defined(VGP_amd64_darwin) \
+    || defined(VGP_amd64_solaris)
 
 UInt VG_(get_StackTrace_wrk) ( ThreadId tid_if_known,
                                /*OUT*/Addr* ips, UInt max_n_ips,
@@ -518,10 +528,19 @@
    /* vg_assert(fp_min <= fp_max);*/
    // On Darwin, this kicks in for pthread-related stack traces, so they're
    // only 1 entry long which is wrong.
-#  if !defined(VGO_darwin)
+#  if defined(VGO_linux)
    if (fp_min + 256 >= fp_max) {
       /* If the stack limits look bogus, don't poke around ... but
          don't bomb out either. */
+#  elif defined(VGO_solaris)
+   if (fp_max == 0) {
+      /* VG_(get_StackTrace)() can be called by tools very early when
+         various tracing options are enabled. Don't proceed further
+         if the stack limits look bogus.
+       */
+#  endif
+#  if defined(VGO_linux) || defined(VGO_solaris)
+
       if (sps) sps[0] = uregs.xsp;
       if (fps) fps[0] = uregs.xbp;
       ips[0] = uregs.xip;
diff --git a/coregrind/m_syscall.c b/coregrind/m_syscall.c
index 3c71623..8cc0564 100644
--- a/coregrind/m_syscall.c
+++ b/coregrind/m_syscall.c
@@ -297,6 +297,51 @@
 }
 
 
+#elif defined(VGO_solaris)
+
+/* Generic constructors. */
+SysRes VG_(mk_SysRes_Error) ( UWord err ) {
+   SysRes r;
+   r._val     = err;
+   r._val2    = 0;
+   r._isError = True;
+   return r;
+}
+
+SysRes VG_(mk_SysRes_Success) ( UWord res ) {
+   SysRes r;
+   r._val     = res;
+   r._val2    = 0;
+   r._isError = False;
+   return r;
+}
+
+SysRes VG_(mk_SysRes_x86_solaris) ( Bool isErr, UInt val, UInt val2 )
+{
+   SysRes res;
+
+   // stay sane
+   vg_assert(isErr == True || isErr == False);
+
+   res._val  = val;
+   res._val2 = val2;
+   res._isError = isErr;
+   return res;
+}
+
+SysRes VG_(mk_SysRes_amd64_solaris) ( Bool isErr, ULong val, ULong val2 )
+{
+   SysRes res;
+
+   // stay sane
+   vg_assert(isErr == True || isErr == False);
+
+   res._val  = val;
+   res._val2 = val2;
+   res._isError = isErr;
+   return res;
+}
+
 #else
 #  error "Unknown OS"
 #endif
@@ -804,6 +849,93 @@
     ".previous\n"
     );
 
+#elif defined(VGP_x86_solaris)
+
+extern ULong
+do_syscall_WRK(UWord a1, UWord a2, UWord a3,    /* 4(esp)..12(esp) */
+               UWord a4, UWord a5, UWord a6,    /* 16(esp)..24(esp) */
+               UWord a7, UWord a8,              /* 28(esp)..32(esp) */
+               UWord syscall_no,                /* 36(esp) */
+               /*OUT*/UInt *errflag);           /* 40(esp) */
+/* Classic unix syscall.. parameters on the stack, an unused (by the kernel)
+   return address at 0(esp), a sysno in eax, a result in edx:eax, the carry
+   flag set on error. */
+__asm__ (
+".text\n"
+".globl do_syscall_WRK\n"
+"do_syscall_WRK:\n"
+"       movl    40(%esp), %ecx\n"       /* assume syscall success */
+"       movl    $0, (%ecx)\n"
+"       movl    36(%esp), %eax\n"
+"       int     $0x91\n"
+"       jnc     1f\n"                   /* jump if success */
+"       movl    40(%esp), %ecx\n"       /* syscall failed - set *errflag */
+"       movl    $1, (%ecx)\n"
+"1:     ret\n"
+".previous\n"
+);
+
+extern ULong
+do_syscall_fast_WRK(UWord syscall_no);          /* 4(esp) */
+/* Fasttrap syscall.. no parameters, a sysno in eax, a result in edx:eax,
+   never fails (if the sysno is valid). */
+__asm__ (
+".text\n"
+".globl do_syscall_fast_WRK\n"
+"do_syscall_fast_WRK:\n"
+"       movl    4(%esp), %eax\n"
+"       int     $0xD2\n"
+"       ret\n"
+".previous\n"
+);
+
+#elif defined(VGP_amd64_solaris)
+
+extern ULong
+do_syscall_WRK(UWord a1, UWord a2, UWord a3,    /* rdi, rsi, rdx */
+               UWord a4, UWord a5, UWord a6,    /* rcx, r8, r9 */
+               UWord a7, UWord a8,              /* 8(rsp), 16(rsp) */
+               UWord syscall_no,                /* 24(rsp) */
+               /*OUT*/ULong *errflag,           /* 32(rsp) */
+               /*OUT*/ULong *res2);             /* 40(rsp) */
+/* First 6 parameters in registers rdi, rsi, rdx, r10, r8, r9, next
+   2 parameters on the stack, an unused (by the kernel) return address at
+   0(rsp), a sysno in rax, a result in rdx:rax, the carry flag set on
+   error. */
+__asm__ (
+".text\n"
+".globl do_syscall_WRK\n"
+"do_syscall_WRK:\n"
+"       movq    %rcx, %r10\n"           /* pass rcx in r10 instead */
+"       movq    32(%rsp), %rcx\n"       /* assume syscall success */
+"       movq    $0, (%rcx)\n"
+"       movq    24(%rsp), %rax\n"
+"       syscall\n"
+"       jnc     1f\n"                   /* jump if success */
+"       movq    32(%rsp), %rcx\n"       /* syscall failed - set *errflag */
+"       movq    $1, (%rcx)\n"
+"1:     movq    40(%rsp), %rcx\n"       /* save 2nd result word */
+"       movq    %rdx, (%rcx)\n"
+"       ret\n"
+".previous\n"
+);
+
+extern ULong
+do_syscall_fast_WRK(UWord syscall_no,           /* rdi */
+                    /*OUT*/ULong *res2);        /* rsi */
+/* Fasttrap syscall.. no parameters, a sysno in rax, a result in rdx:rax,
+   never fails (if the sysno is valid). */
+__asm__ (
+".text\n"
+".globl do_syscall_fast_WRK\n"
+"do_syscall_fast_WRK:\n"
+"       movq    %rdi, %rax\n"
+"       int     $0xD2\n"
+"       movq    %rdx, (%rsi)\n"         /* save 2nd result word */
+"       ret\n"
+".previous\n"
+);
+
 #else
 #  error Unknown platform
 #endif
@@ -937,6 +1069,62 @@
 
    return VG_(mk_SysRes_tilegx_linux)( val );
 
+#  elif defined(VGP_x86_solaris)
+   UInt val, val2, err = False;
+   Bool restart;
+   ULong u64;
+   UChar ssclass = VG_SOLARIS_SYSNO_CLASS(sysno);
+
+   switch (ssclass) {
+      case VG_SOLARIS_SYSCALL_CLASS_CLASSIC:
+         /* The Solaris kernel does not restart syscalls automatically so it
+            is done here. */
+         do {
+            u64 = do_syscall_WRK(a1,a2,a3,a4,a5,a6,a7,a8,
+                                 VG_SOLARIS_SYSNO_INDEX(sysno), &err);
+            val = (UInt)u64;
+            restart = err && (val == VKI_EINTR || val == VKI_ERESTART);
+         } while (restart);
+         break;
+      case VG_SOLARIS_SYSCALL_CLASS_FASTTRAP:
+         u64 = do_syscall_fast_WRK(VG_SOLARIS_SYSNO_INDEX(sysno));
+         break;
+      default:
+         vg_assert(0);
+         break;
+   }
+
+   val = (UInt)u64;
+   val2 = (UInt)(u64 >> 32);
+   return VG_(mk_SysRes_x86_solaris)(err ? True : False, val,
+                                     err ? 0 : val2);
+
+#  elif defined(VGP_amd64_solaris)
+   ULong val, val2, err = False;
+   Bool restart;
+   UChar ssclass = VG_SOLARIS_SYSNO_CLASS(sysno);
+
+   switch (ssclass) {
+      case VG_SOLARIS_SYSCALL_CLASS_CLASSIC:
+         /* The Solaris kernel does not restart syscalls automatically so it
+            is done here. */
+         do {
+            val = do_syscall_WRK(a1,a2,a3,a4,a5,a6,a7,a8,
+                                 VG_SOLARIS_SYSNO_INDEX(sysno), &err, &val2);
+            restart = err && (val == VKI_EINTR || val == VKI_ERESTART);
+         } while (restart);
+         break;
+      case VG_SOLARIS_SYSCALL_CLASS_FASTTRAP:
+         val = do_syscall_fast_WRK(VG_SOLARIS_SYSNO_INDEX(sysno), &val2);
+         break;
+      default:
+         vg_assert(0);
+         break;
+   }
+
+   return VG_(mk_SysRes_amd64_solaris)(err ? True : False, val,
+                                       err ? 0 : val2);
+
 #else
 #  error Unknown platform
 #endif
diff --git a/coregrind/m_syswrap/priv_syswrap-generic.h b/coregrind/m_syswrap/priv_syswrap-generic.h
index f014914..6dbcf12 100644
--- a/coregrind/m_syswrap/priv_syswrap-generic.h
+++ b/coregrind/m_syswrap/priv_syswrap-generic.h
@@ -51,7 +51,7 @@
 /* Handy small function to help stop wrappers from segfaulting when
    presented with bogus client addresses.  Is not used for generating
    user-visible errors. */
-extern Bool ML_(safe_to_deref) ( void* start, SizeT size );
+extern Bool ML_(safe_to_deref) ( const void *start, SizeT size );
 
 // Returns True if the signal is OK for the client to use.
 extern Bool ML_(client_signal_OK)(Int sigNo);
@@ -61,11 +61,18 @@
 Bool ML_(fd_allowed)(Int fd, const HChar *syscallname, ThreadId tid,
                      Bool isNewFD);
 
+extern void ML_(record_fd_close)               (Int fd);
 extern void ML_(record_fd_open_named)          (ThreadId tid, Int fd);
 extern void ML_(record_fd_open_nameless)       (ThreadId tid, Int fd);
 extern void ML_(record_fd_open_with_given_name)(ThreadId tid, Int fd,
                                                 const HChar *pathname);
 
+// Return true if a given file descriptor is already recorded.
+extern Bool ML_(fd_recorded)(Int fd);
+// Returns a pathname representing a recorded fd.
+// Returned string must not be modified nor free'd.
+extern const HChar *ML_(find_fd_recorded_by_fd)(Int fd);
+
 // Used when killing threads -- we must not kill a thread if it's the thread
 // that would do Valgrind's final cleanup and output.
 extern
@@ -94,6 +101,12 @@
 extern 
 void ML_(POST_unknown_ioctl)(ThreadId tid, UInt res, UWord request, UWord arg);
 
+extern
+void ML_(pre_argv_envp)(Addr a, ThreadId tid, const HChar *s1, const HChar *s2);
+
+extern Bool
+ML_(handle_auxv_open)(SyscallStatus *status, const HChar *filename,
+                      int flags);
 
 DECL_TEMPLATE(generic, sys_ni_syscall);            // * P -- unimplemented
 DECL_TEMPLATE(generic, sys_exit);
diff --git a/coregrind/m_syswrap/priv_syswrap-solaris.h b/coregrind/m_syswrap/priv_syswrap-solaris.h
new file mode 100644
index 0000000..1ea0aa8
--- /dev/null
+++ b/coregrind/m_syswrap/priv_syswrap-solaris.h
@@ -0,0 +1,92 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Solaris-specific syscalls stuff.      priv_syswrap-solaris.h ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2011-2014 Petr Pavlu
+      setup@dagobah.cz
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#ifndef __PRIV_SYSWRAP_SOLARIS_H
+#define __PRIV_SYSWRAP_SOLARIS_H
+
+#include "pub_core_basics.h"        // VG_ macro
+#include "priv_types_n_macros.h"    // DECL_TEMPLATE
+#include "pub_core_tooliface.h"     // CorePart
+
+/* Macro to join a syscall name with a syscall variant. */
+#define SC2(name, subname) \
+   name "_" subname
+
+/* Macro to join a syscall name with its variant and sub-variant. */
+#define SC3(name, subname, subsubname) \
+   name "_" subname "_" subsubname
+
+extern void ML_(call_on_new_stack_0_1)(Addr stack, Addr retaddr,
+                                       void (*f)(Word), Word arg1);
+extern Word ML_(start_thread_NORETURN)(void *arg);
+extern Addr ML_(allocstack)           (ThreadId tid);
+extern void ML_(setup_start_thread_context)(ThreadId tid, vki_ucontext_t *uc);
+
+extern UInt ML_(fletcher32)(UShort *buf, SizeT blocks);
+extern ULong ML_(fletcher64)(UInt *buf, SizeT blocks);
+extern void ML_(save_machine_context)(ThreadId tid, vki_ucontext_t *uc,
+                                      CorePart part);
+extern void ML_(restore_machine_context)(ThreadId tid, vki_ucontext_t *uc,
+                                         CorePart part, Bool esp_is_thrptr);
+
+#if defined(VGP_x86_solaris)
+
+extern void ML_(setup_gdt)(VexGuestX86State *vex);
+extern void ML_(cleanup_gdt)(VexGuestX86State *vex);
+extern void ML_(update_gdt_lwpgs)(ThreadId tid);
+
+/* prototypes */
+DECL_TEMPLATE(x86_solaris, sys_fstatat64);
+DECL_TEMPLATE(x86_solaris, sys_openat64);
+DECL_TEMPLATE(x86_solaris, sys_llseek32);
+DECL_TEMPLATE(x86_solaris, sys_mmap64);
+DECL_TEMPLATE(x86_solaris, sys_stat64);
+DECL_TEMPLATE(x86_solaris, sys_lstat64);
+DECL_TEMPLATE(x86_solaris, sys_fstat64);
+DECL_TEMPLATE(x86_solaris, sys_statvfs64);
+DECL_TEMPLATE(x86_solaris, sys_fstatvfs64);
+DECL_TEMPLATE(x86_solaris, sys_setrlimit64);
+DECL_TEMPLATE(x86_solaris, sys_getrlimit64);
+DECL_TEMPLATE(x86_solaris, sys_pread64);
+DECL_TEMPLATE(x86_solaris, sys_pwrite64);
+DECL_TEMPLATE(x86_solaris, sys_open64);
+
+#elif defined(VGP_amd64_solaris)
+/* Nothing yet. */
+
+#else
+#  error "Unknown platform"
+#endif
+
+#endif   // __PRIV_SYSWRAP_SOLARIS_H
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_syswrap/priv_types_n_macros.h b/coregrind/m_syswrap/priv_types_n_macros.h
index 26a82f7..16576d5 100644
--- a/coregrind/m_syswrap/priv_types_n_macros.h
+++ b/coregrind/m_syswrap/priv_types_n_macros.h
@@ -112,7 +112,7 @@
       Int s_arg6;
       Int uu_arg7;
       Int uu_arg8;
-#     elif defined(VGP_x86_darwin)
+#     elif defined(VGP_x86_darwin) || defined(VGP_x86_solaris)
       Int s_arg1;
       Int s_arg2;
       Int s_arg3;
@@ -121,7 +121,7 @@
       Int s_arg6;
       Int s_arg7;
       Int s_arg8;
-#     elif defined(VGP_amd64_darwin)
+#     elif defined(VGP_amd64_darwin) || defined(VGP_amd64_solaris)
       Int o_arg1;
       Int o_arg2;
       Int o_arg3;
@@ -203,6 +203,10 @@
 extern const SyscallTableEntry ML_(syscall_table)[];
 extern const UInt ML_(syscall_table_size);
 
+#elif defined(VGO_solaris)
+extern
+SyscallTableEntry* ML_(get_solaris_syscall_entry)( UInt sysno );
+
 #else
 #  error Unknown OS
 #endif   
@@ -281,7 +285,7 @@
     vgSysWrap_##auxstr##_##name##_after
 
 /* Add a generic wrapper to a syscall table. */
-#if defined(VGO_linux)
+#if defined(VGO_linux) || defined(VGO_solaris)
 #  define GENX_(sysno, name)  WRAPPER_ENTRY_X_(generic, sysno, name)
 #  define GENXY(sysno, name)  WRAPPER_ENTRY_XY(generic, sysno, name)
 #elif defined(VGO_darwin)
@@ -331,7 +335,7 @@
    return sr_Res(st->sres);
 }
 
-#if defined(VGO_darwin)
+#if defined(VGO_darwin) || defined(VGO_solaris)
 static inline UWord getRESHI ( SyscallStatus* st ) {
    vg_assert(st->what == SsComplete);
    vg_assert(!sr_isError(st->sres));
@@ -403,7 +407,7 @@
 #  define PRA5(s,t,a) PRRAn(5,s,t,a)
 #  define PRA6(s,t,a) PRRAn(6,s,t,a)
 
-#elif defined(VGP_x86_darwin)
+#elif defined(VGP_x86_darwin) || defined(VGP_x86_solaris)
    /* Up to 8 parameters, all on the stack. */
 #  define PRA1(s,t,a) PSRAn(1,s,t,a)
 #  define PRA2(s,t,a) PSRAn(2,s,t,a)
@@ -414,7 +418,7 @@
 #  define PRA7(s,t,a) PSRAn(7,s,t,a)
 #  define PRA8(s,t,a) PSRAn(8,s,t,a)
 
-#elif defined(VGP_amd64_darwin)
+#elif defined(VGP_amd64_darwin) || defined(VGP_amd64_solaris)
    /* Up to 8 parameters, 6 in registers, 2 on the stack. */
 #  define PRA1(s,t,a) PRRAn(1,s,t,a)
 #  define PRA2(s,t,a) PRRAn(2,s,t,a)
diff --git a/coregrind/m_syswrap/syscall-amd64-solaris.S b/coregrind/m_syswrap/syscall-amd64-solaris.S
new file mode 100644
index 0000000..f205223
--- /dev/null
+++ b/coregrind/m_syswrap/syscall-amd64-solaris.S
@@ -0,0 +1,278 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Support for doing system calls.      syscall-amd64-solaris.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+  This file is part of Valgrind, a dynamic binary instrumentation
+  framework.
+
+  Copyright (C) 2014-2014 Petr Pavlu
+     setup@dagobah.cz
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+  02111-1307, USA.
+
+  The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_amd64_solaris)
+
+#include "pub_core_basics_asm.h"
+#include "pub_core_vkiscnums_asm.h"
+#include "libvex_guest_offsets.h"
+
+/* From vki-solaris.h, checked at startup by m_vki.c. */
+#define VKI_SIG_SETMASK 3
+
+/* Prototype:
+   Int ML_(do_syscall_for_client_WRK)(
+      Int syscallno,			// %rdi = %rbp-48
+      void *guest_state,		// %rsi = %rbp-40
+      const vki_sigset_t *sysmask,	// %rdx = %rbp-32
+      const vki_sigset_t *postmask,	// %rcx = %rbp-24
+      UChar *cflag)			// %r8 = %rbp-16
+*/
+
+.macro ESTABLISH_STACKFRAME
+	/* Establish stack frame. */
+	pushq	%rbp
+	movq	%rsp, %rbp
+	pushq	%rbx				/* save %rbx */
+
+	/* We'll use %rbx instead of %rbp to address the stack frame after the
+	   door syscall is finished because %rbp is cleared by the syscall. */
+	movq	%rsp, %rbx			/* %rbx = %rbp - 8 */
+
+	/* Push the parameters on the stack. */
+	pushq	%r8				/* store %r8 at %rbp-16 */
+	pushq	%rcx				/* store %rcx at %rbp-24 */
+	pushq	%rdx				/* store %rdx at %rbp-32 */
+	pushq	%rsi				/* store %rsi at %rbp-40 */
+	pushq	%rdi				/* store %rdi at %rbp-48 */
+.endm
+
+.macro UNBLOCK_SIGNALS
+	/* Set the signal mask which should be current during the syscall. */
+	/* Set up for sigprocmask(SIG_SETMASK, sysmask, postmask). */
+	movq	-24(%rbp), %rdx
+	movq	-32(%rbp), %rsi
+	movq	$VKI_SIG_SETMASK, %rdi
+	movq	$__NR_sigprocmask, %rax
+	syscall
+	jc	sigprocmask_failed		/* sigprocmask failed */
+.endm
+
+.macro REBLOCK_SIGNALS
+	/* Set up for sigprocmask(SIG_SETMASK, postmask, NULL). */
+	movq	$0, %rdx
+	movq	-24(%rbp), %rsi
+	movq	$VKI_SIG_SETMASK, %rdi
+	movq	$__NR_sigprocmask, %rax
+	syscall
+	/* The syscall above changes the carry flag.  This means that if the
+	   syscall fails and we receive an interrupt after it then we've got
+	   an invalid carry flag value in the fixup code.  We don't care about
+	   it because this syscall should never fail and if it does then we're
+	   going to stop Valgrind anyway. */
+	jc	sigprocmask_failed		/* sigprocmask failed */
+.endm
+
+.macro SIMPLE_RETURN
+	xorq	%rax, %rax			/* SUCCESS */
+	movq	-8(%rbp), %rbx			/* restore %rbx */
+	movq	%rbp, %rsp
+	popq	%rbp
+	ret
+.endm
+
+sigprocmask_failed:
+	/* Failure: return 0x8000 | error code. */
+	andq	$0x7FFF, %rax
+	orq	$0x8000, %rax
+	movq	-8(%rbp), %rbx			/* restore %rbx */
+	movq	%rbp, %rsp
+	popq	%rbp
+	ret
+
+.globl ML_(do_syscall_for_client_WRK)
+ML_(do_syscall_for_client_WRK):
+	ESTABLISH_STACKFRAME
+
+1:	/* Even though we can't take a signal until the sigprocmask completes,
+	   start the range early.  If %rip is in the range [1, 2), the syscall
+	   hasn't been started yet. */
+	UNBLOCK_SIGNALS
+
+	/* Copy syscall parameters. */
+	/* do_syscall8 */
+	/* 6 register parameters. */
+	movq	-40(%rbp), %rax
+	movq	OFFSET_amd64_RDI(%rax), %rdi
+	movq	OFFSET_amd64_RSI(%rax), %rsi
+	movq	OFFSET_amd64_RDX(%rax), %rdx
+	movq	OFFSET_amd64_R10(%rax), %r10
+	movq	OFFSET_amd64_R8(%rax), %r8
+	movq	OFFSET_amd64_R9(%rax), %r9
+	/* 2 stack parameters. */
+	movq	OFFSET_amd64_RSP(%rax), %rax
+	movq	16(%rax), %r11
+	pushq	%r11
+	movq	8(%rax), %r11
+	pushq	%r11
+	/* Return address. */
+	movq	0(%rax), %r11
+	pushq	%r11
+
+	/* Put syscall number in %rax. */
+	movq	-48(%rbp), %rax
+
+	/* Do the syscall.  Note that the Solaris kernel doesn't directly
+	   restart syscalls! */
+	syscall
+
+2:	/* In the range [2, 3), the syscall result is in %rax and %rdx and C,
+	   but hasn't been committed to the thread state.  If we get
+	   interrupted in this section then we'll just use values saved in the
+	   ucontext structure.
+
+	   Important note for this and the following section: Don't add here
+	   any code that alters the carry flag or worse, call any function.
+	   That would completely break the fixup after an interrupt. */
+	movq	-40(%rbp), %rcx
+	movq	%rax, OFFSET_amd64_RAX(%rcx)	/* save %rax to VEX */
+	movq	%rdx, OFFSET_amd64_RDX(%rcx)	/* save %rdx to VEX */
+	movq	-16(%rbp), %rcx
+	setc	0(%rcx)				/* save returned carry flag */
+
+3:	/* Re-block signals. If %rip is in [3, 4), then the syscall is
+	   complete and we do not need to worry about it.  We have to only
+	   correctly save the carry flag.  If we get interrupted in this
+	   section then we just have to propagate the carry flag from the
+	   ucontext structure to the thread state, %rax and %rdx values are
+	   already saved. */
+	REBLOCK_SIGNALS
+
+4:	/* Now safe from signals. */
+	SIMPLE_RETURN
+
+.section .rodata
+/* Export the ranges so that
+   VG_(fixup_guest_state_after_syscall_interrupted) can do the right thing. */
+
+.globl ML_(blksys_setup)
+.globl ML_(blksys_complete)
+.globl ML_(blksys_committed)
+.globl ML_(blksys_finished)
+ML_(blksys_setup):	.quad 1b
+ML_(blksys_complete):	.quad 2b
+ML_(blksys_committed):	.quad 3b
+ML_(blksys_finished):	.quad 4b
+.previous
+
+/* Prototype:
+   Int ML_(do_syscall_for_client_dret_WRK)(
+      Int syscallno,			// %rdi = %rbp-48 = %rbx-48+8
+      void *guest_state,		// %rsi = %rbp-40 = %rbx-40+8
+      const vki_sigset_t *sysmask,	// %rdx = %rbp-32 = %rbx-32+8
+      const vki_sigset_t *postmask,	// %rcx = %rbp-24 = %rbx-24+8
+      UChar *cflag)			// %r8 = %rbp-16 = %rbx-16+8
+*/
+
+/* Door_return is a very special call because the data are stored by the
+   kernel directly on the stack and the stack pointer is appropriately
+   modified by the kernel.  Therefore we switch to the client stack before
+   doing the syscall, this is relatively trivial but an extra care has to be
+   taken when we get interrupted at some point. */
+
+.globl ML_(do_syscall_for_client_dret_WRK)
+ML_(do_syscall_for_client_dret_WRK):
+	ESTABLISH_STACKFRAME
+
+1:	/* Even though we can't take a signal until the sigprocmask completes,
+	   start the range early.  If %rip is in the range [1, 2), the syscall
+	   hasn't been started yet. */
+	UNBLOCK_SIGNALS
+
+	/* Prepare 6 register parameters. */
+	movq	-40(%rbp), %rax
+	movq	OFFSET_amd64_RDI(%rax), %rdi
+	movq	OFFSET_amd64_RSI(%rax), %rsi
+	movq	OFFSET_amd64_RDX(%rax), %rdx
+	movq	OFFSET_amd64_R10(%rax), %r10
+	movq	OFFSET_amd64_R8(%rax), %r8
+	movq	OFFSET_amd64_R9(%rax), %r9
+
+	/* Switch to the client stack. */
+	movq	OFFSET_amd64_RSP(%rax), %rsp	/* %rsp = simulated RSP */
+	/* Change %rbp to a client value. It will always get committed by
+	   the fixup code for range [2, 3) so it needs to be set to what the
+	   client expects. */
+	movq	OFFSET_amd64_RBP(%rax), %rbp	/* %rbp = simulated RBP */
+
+	/* Put syscall number in %rax. */
+	movq	-48+8(%rbx), %rax
+
+	/* Do the syscall.  Note that the Solaris kernel doesn't directly
+	   restart syscalls! */
+	syscall
+
+2:	/* In the range [2, 3), the syscall result is in %rax, %rdx, %rsp and
+	   %rbp and C, but hasn't been committed to the thread state.  If we
+	   get interrupted in this section then we'll just use values saved in
+	   the ucontext structure.
+
+	   Important note for this and the following section: Don't add here
+	   any code that alters the carry flag or worse, call any function.
+	   That would completely break the fixup after an interrupt. */
+	movq	-40+8(%rbx), %rcx
+	movq	%rax, OFFSET_amd64_RAX(%rcx)	/* save %rax to VEX */
+	movq	%rdx, OFFSET_amd64_RDX(%rcx)	/* save %rdx to VEX */
+	movq	%rsp, OFFSET_amd64_RSP(%rcx)	/* save %rsp to VEX */
+	movq	%rbp, OFFSET_amd64_RBP(%rcx)	/* save %rbp to VEX */
+	movq	-16+8(%rbx), %rcx
+	setc	0(%rcx)				/* save returned carry flag */
+
+	movq	%rbx, %rsp			/* switch to V stack */
+
+3:	/* Re-block signals. If %rip is in [3, 4), then the syscall is
+	   complete and we do not need worry about it.  We have to only
+	   correctly save the carry flag.  If we get interrupted in this
+	   section then we just have to propagate the carry flag from the
+	   ucontext structure to the thread state, %rax, %rdx, %rsp and %rbp
+	   values are already saved. */
+	movq	%rbx, %rbp
+	addq	$8, %rbp
+	REBLOCK_SIGNALS
+
+4:	/* Now safe from signals. */
+	SIMPLE_RETURN
+
+.section .rodata
+.globl ML_(blksys_setup_DRET)
+.globl ML_(blksys_complete_DRET)
+.globl ML_(blksys_committed_DRET)
+.globl ML_(blksys_finished_DRET)
+ML_(blksys_setup_DRET):		.quad 1b
+ML_(blksys_complete_DRET):	.quad 2b
+ML_(blksys_committed_DRET):	.quad 3b
+ML_(blksys_finished_DRET):	.quad 4b
+.previous
+
+#endif // defined(VGP_amd64_solaris)
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_syswrap/syscall-x86-solaris.S b/coregrind/m_syswrap/syscall-x86-solaris.S
new file mode 100644
index 0000000..cb4346f
--- /dev/null
+++ b/coregrind/m_syswrap/syscall-x86-solaris.S
@@ -0,0 +1,275 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Support for doing system calls.        syscall-x86-solaris.S ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+  This file is part of Valgrind, a dynamic binary instrumentation
+  framework.
+
+  Copyright (C) 2011-2014 Petr Pavlu
+     setup@dagobah.cz
+
+  This program is free software; you can redistribute it and/or
+  modify it under the terms of the GNU General Public License as
+  published by the Free Software Foundation; either version 2 of the
+  License, or (at your option) any later version.
+
+  This program is distributed in the hope that it will be useful, but
+  WITHOUT ANY WARRANTY; without even the implied warranty of
+  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+  General Public License for more details.
+
+  You should have received a copy of the GNU General Public License
+  along with this program; if not, write to the Free Software
+  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+  02111-1307, USA.
+
+  The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_x86_solaris)
+
+#include "pub_core_basics_asm.h"
+#include "pub_core_vkiscnums_asm.h"
+#include "libvex_guest_offsets.h"
+
+/* From vki-solaris.h, checked at startup by m_vki.c. */
+#define VKI_SIG_SETMASK 3
+
+/* Prototype:
+   Int ML_(do_syscall_for_client_WRK)(
+      Int syscallno,			// %ebp+8
+      void *guest_state,		// %ebp+12
+      const vki_sigset_t *sysmask,	// %ebp+16
+      const vki_sigset_t *postmask,	// %ebp+20
+      UChar *cflag)			// %ebp+24
+*/
+
+.macro ESTABLISH_STACKFRAME
+	/* Establish stack frame. */
+	pushl	%ebp
+	movl	%esp, %ebp
+	pushl	%ebx				/* save %ebx */
+
+	/* We'll use %ebx instead of %ebp to address the stack frame after the
+	   door syscall is finished because %ebp is cleared by the syscall. */
+	movl	%esp, %ebx			/* %ebx = %ebp - 4 */
+.endm
+
+.macro UNBLOCK_SIGNALS
+	/* Set the signal mask which should be current during the syscall. */
+	/* Set up for sigprocmask(SIG_SETMASK, sysmask, postmask). */
+	pushl	20(%ebp)
+	pushl	16(%ebp)
+	pushl	$VKI_SIG_SETMASK
+	pushl	$0xcafebabe			/* totally fake return address */
+	movl	$__NR_sigprocmask, %eax
+	int	$0x91
+	jc	sigprocmask_failed		/* sigprocmask failed */
+	addl	$16, %esp
+.endm
+
+.macro REBLOCK_SIGNALS
+	/* Set up for sigprocmask(SIG_SETMASK, postmask, NULL). */
+	pushl	$0
+	pushl	20(%ebp)
+	pushl	$VKI_SIG_SETMASK
+	pushl	$0xcafef00d			/* totally fake return address */
+	movl	$__NR_sigprocmask, %eax
+	int	$0x91
+	/* The syscall above changes the carry flag.  This means that if the
+	   syscall fails and we receive an interrupt after it then we've got
+	   an invalid carry flag value in the fixup code.  We don't care about
+	   it because this syscall should never fail and if it does then we're
+	   going to stop Valgrind anyway. */
+	jc	sigprocmask_failed		/* sigprocmask failed */
+	addl	$16, %esp
+.endm
+
+.macro SIMPLE_RETURN
+	xorl	%eax, %eax			/* SUCCESS */
+	movl	-4(%ebp), %ebx			/* restore %ebx */
+	movl	%ebp, %esp
+	popl	%ebp
+	ret
+.endm
+
+sigprocmask_failed:
+	/* Failure: return 0x8000 | error code. */
+	/* Note that we enter here with %esp being 16 too low (4 extra words
+	   on the stack).  But because we're nuking the stack frame now, that
+	   doesn't matter. */
+	andl	$0x7FFF, %eax
+	orl	$0x8000, %eax
+	movl	-4(%ebp), %ebx			/* restore %ebx */
+	movl	%ebp, %esp
+	popl	%ebp
+	ret
+
+.globl ML_(do_syscall_for_client_WRK)
+ML_(do_syscall_for_client_WRK):
+	ESTABLISH_STACKFRAME
+
+1:	/* Even though we can't take a signal until the sigprocmask completes,
+	   start the range early.  If %eip is in the range [1, 2), the syscall
+	   hasn't been started yet. */
+	UNBLOCK_SIGNALS
+
+	/* Copy syscall parameters to the stack - assume no more than 8 plus
+	   the return address. */
+	/* do_syscall8 */
+	movl	12(%ebp), %edx
+	movl	OFFSET_x86_ESP(%edx), %edx	/* %edx = simulated ESP */
+	movl	28+4(%edx), %eax
+	pushl	%eax
+	movl	24+4(%edx), %eax
+	pushl	%eax
+	movl	20+4(%edx), %eax
+	pushl	%eax
+	movl	16+4(%edx), %eax
+	pushl	%eax
+	movl	12+4(%edx), %eax
+	pushl	%eax
+	movl	8+4(%edx), %eax
+	pushl	%eax
+	movl	4+4(%edx), %eax
+	pushl	%eax
+	movl	0+4(%edx), %eax
+	pushl	%eax
+	/* Return address. */
+	movl	0(%edx), %eax
+	pushl	%eax
+
+	/* Put syscall number in %eax. */
+	movl	8(%ebp), %eax
+
+	/* Do the syscall.  Note that the Solaris kernel doesn't directly
+	   restart syscalls! */
+	int	$0x91
+
+2:	/* In the range [2, 3), the syscall result is in %eax and %edx and C,
+	   but hasn't been committed to the thread state.  If we get
+	   interrupted in this section then we'll just use values saved in the
+	   ucontext structure.
+
+	   Important note for this and the following section: Don't add here
+	   any code that alters the carry flag or worse, call any function.
+	   That would completely break the fixup after an interrupt. */
+	movl	12(%ebp), %ecx
+	movl	%eax, OFFSET_x86_EAX(%ecx)	/* save %eax to VEX */
+	movl	%edx, OFFSET_x86_EDX(%ecx)	/* save %edx to VEX */
+	movl	24(%ebp), %ecx
+	setc	0(%ecx)				/* save returned carry flag */
+
+3:	/* Re-block signals. If %eip is in [3, 4), then the syscall is
+	   complete and we do not need to worry about it.  We have to only
+	   correctly save the carry flag.  If we get interrupted in this
+	   section then we just have to propagate the carry flag from the
+	   ucontext structure to the thread state, %eax and %edx values are
+	   already saved. */
+	REBLOCK_SIGNALS
+
+4:	/* Now safe from signals. */
+	SIMPLE_RETURN
+
+.section .rodata
+/* Export the ranges so that
+   VG_(fixup_guest_state_after_syscall_interrupted) can do the right thing. */
+
+.globl ML_(blksys_setup)
+.globl ML_(blksys_complete)
+.globl ML_(blksys_committed)
+.globl ML_(blksys_finished)
+ML_(blksys_setup):	.long 1b
+ML_(blksys_complete):	.long 2b
+ML_(blksys_committed):	.long 3b
+ML_(blksys_finished):	.long 4b
+.previous
+
+/* Prototype:
+   Int ML_(do_syscall_for_client_dret_WRK)(
+      Int syscallno,			// %ebp+8 = %ebx+8+4
+      void *guest_state,		// %ebp+12 = %ebx+12+4
+      const vki_sigset_t *sysmask,	// %ebp+16 = %ebx+16+4
+      const vki_sigset_t *postmask,	// %ebp+20 = %ebx+20+4
+      UChar *cflag)			// %ebp+24 = %ebx+24+4
+*/
+
+/* Door_return is a very special call because the data are stored by the
+   kernel directly on the stack and the stack pointer is appropriately
+   modified by the kernel.  Therefore we switch to the client stack before
+   doing the syscall, this is relatively trivial but an extra care has to be
+   taken when we get interrupted at some point. */
+
+.globl ML_(do_syscall_for_client_dret_WRK)
+ML_(do_syscall_for_client_dret_WRK):
+	ESTABLISH_STACKFRAME
+
+1:	/* Even though we can't take a signal until the sigprocmask completes,
+	   start the range early.  If %eip is in the range [1, 2), the syscall
+	   hasn't been started yet. */
+	UNBLOCK_SIGNALS
+
+	/* Switch to the client stack. */
+	movl	12(%ebp), %edx
+	movl	OFFSET_x86_ESP(%edx), %esp	/* %esp = simulated ESP */
+	/* Change %ebp to a client value. It will always get committed by
+	   the fixup code for range [2, 3) so it needs to be set to what the
+	   client expects. */
+	movl	OFFSET_x86_EBP(%edx), %ebp	/* %ebp = simulated EBP */
+
+	/* Put syscall number in %eax. */
+	movl	8+4(%ebx), %eax
+
+	/* Do the syscall.  Note that the Solaris kernel doesn't directly
+	   restart syscalls! */
+	int	$0x91
+
+2:	/* In the range [2, 3), the syscall result is in %eax, %edx, %esp and
+	   %ebp and C, but hasn't been committed to the thread state.  If we
+	   get interrupted in this section then we'll just use values saved in
+	   the ucontext structure.
+
+	   Important note for this and the following section: Don't add here
+	   any code that alters the carry flag or worse, call any function.
+	   That would completely break the fixup after an interrupt. */
+	movl	12+4(%ebx), %ecx
+	movl	%eax, OFFSET_x86_EAX(%ecx)	/* save %eax to VEX */
+	movl	%edx, OFFSET_x86_EDX(%ecx)	/* save %edx to VEX */
+	movl	%esp, OFFSET_x86_ESP(%ecx)	/* save %esp to VEX */
+	movl	%ebp, OFFSET_x86_EBP(%ecx)	/* save %ebp to VEX */
+	movl	24+4(%ebx), %ecx
+	setc	0(%ecx)				/* save returned carry flag */
+
+	movl	%ebx, %esp			/* switch to V stack */
+
+3:	/* Re-block signals. If %eip is in [3, 4), then the syscall is
+	   complete and we do not need worry about it.  We have to only
+	   correctly save the carry flag.  If we get interrupted in this
+	   section then we just have to propagate the carry flag from the
+	   ucontext structure to the thread state, %eax, %edx, %esp and %ebp
+	   values are already saved. */
+	movl	%ebx, %ebp
+	addl	$4, %ebp
+	REBLOCK_SIGNALS
+
+4:	/* Now safe from signals. */
+	SIMPLE_RETURN
+
+.section .rodata
+.globl ML_(blksys_setup_DRET)
+.globl ML_(blksys_complete_DRET)
+.globl ML_(blksys_committed_DRET)
+.globl ML_(blksys_finished_DRET)
+ML_(blksys_setup_DRET):		.long 1b
+ML_(blksys_complete_DRET):	.long 2b
+ML_(blksys_committed_DRET):	.long 3b
+ML_(blksys_finished_DRET):	.long 4b
+.previous
+
+#endif // defined(VGP_x86_solaris)
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_syswrap/syswrap-amd64-solaris.c b/coregrind/m_syswrap/syswrap-amd64-solaris.c
new file mode 100644
index 0000000..2bc2cae
--- /dev/null
+++ b/coregrind/m_syswrap/syswrap-amd64-solaris.c
@@ -0,0 +1,564 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Platform-specific syscalls stuff.    syswrap-amd64-solaris.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2014-2014 Petr Pavlu
+      setup@dagobah.cz
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_amd64_solaris)
+
+#include "libvex_guest_offsets.h"
+#include "pub_core_basics.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_vki.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_syswrap.h"
+
+#include "priv_types_n_macros.h"
+#include "priv_syswrap-generic.h"
+#include "priv_syswrap-solaris.h"
+
+
+/* Call f(arg1), but first switch stacks, using 'stack' as the new stack, and
+   use 'retaddr' as f's return-to address.  Also, clear all the integer
+   registers before entering f. */
+__attribute__((noreturn))
+void ML_(call_on_new_stack_0_1)(Addr stack,             /* %rdi */
+                                Addr retaddr,           /* %rsi */
+                                void (*f)(Word),        /* %rdx */
+                                Word arg1);             /* %rcx */
+__asm__ (
+".text\n"
+".globl vgModuleLocal_call_on_new_stack_0_1\n"
+"vgModuleLocal_call_on_new_stack_0_1:\n"
+"   movq  %rdi, %rsp\n"         /* set stack */
+"   movq  %rcx, %rdi\n"         /* set arg1 */
+"   pushq %rsi\n"               /* retaddr to stack */
+"   pushq %rdx\n"               /* f to stack */
+"   movq  $0, %rax\n"           /* zero all GP regs (except %rdi) */
+"   movq  $0, %rbx\n"
+"   movq  $0, %rcx\n"
+"   movq  $0, %rdx\n"
+"   movq  $0, %rsi\n"
+"   movq  $0, %rbp\n"
+"   movq  $0, %r8\n"
+"   movq  $0, %r9\n"
+"   movq  $0, %r10\n"
+"   movq  $0, %r11\n"
+"   movq  $0, %r12\n"
+"   movq  $0, %r13\n"
+"   movq  $0, %r14\n"
+"   movq  $0, %r15\n"
+"   ret\n"                      /* jump to f */
+"   ud2\n"                      /* should never get here */
+".previous\n"
+);
+
+/* This function is called to setup a context of a new Valgrind thread (which
+   will run the client code). */
+void ML_(setup_start_thread_context)(ThreadId tid, vki_ucontext_t *uc)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   UWord *stack = (UWord*)tst->os_state.valgrind_stack_init_SP;
+
+   VG_(memset)(uc, 0, sizeof(*uc));
+   uc->uc_flags = VKI_UC_CPU | VKI_UC_SIGMASK;
+
+   /* Start the thread with everything blocked. */
+   VG_(sigfillset)(&uc->uc_sigmask);
+
+   /* Set up the stack, it should be always 16-byte aligned before doing
+      a function call, i.e. the first parameter is also 16-byte aligned. */
+   vg_assert(VG_IS_16_ALIGNED(stack));
+   stack -= 1;
+   stack[0] = 0; /* bogus return value */
+
+   /* Set up the registers. */
+   uc->uc_mcontext.gregs[VKI_REG_RDI] = (UWord)tst; /* the parameter */
+   uc->uc_mcontext.gregs[VKI_REG_RIP] = (UWord)ML_(start_thread_NORETURN);
+   uc->uc_mcontext.gregs[VKI_REG_RSP] = (UWord)stack;
+}
+
+/* Architecture-specific part of VG_(save_context). */
+void ML_(save_machine_context)(ThreadId tid, vki_ucontext_t *uc,
+                               CorePart part)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   struct vki_fpchip_state *fs
+      = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
+   SizeT i;
+
+   /* CPU */
+   /* Common registers */
+   uc->uc_mcontext.gregs[VKI_REG_RIP] = tst->arch.vex.guest_RIP;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RIP,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RIP], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_RAX] = tst->arch.vex.guest_RAX;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RAX,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RAX], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_RBX] = tst->arch.vex.guest_RBX;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RBX,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBX], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_RCX] = tst->arch.vex.guest_RCX;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RCX,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RCX], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_RDX] = tst->arch.vex.guest_RDX;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RDX,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDX], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_RBP] = tst->arch.vex.guest_RBP;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RBP,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBP], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_RSI] = tst->arch.vex.guest_RSI;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RSI,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSI], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_RDI] = tst->arch.vex.guest_RDI;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RDI,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDI], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_R8] = tst->arch.vex.guest_R8;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R8,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R8], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_R9] = tst->arch.vex.guest_R9;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R9,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R9], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_R10] = tst->arch.vex.guest_R10;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R10,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R10], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_R11] = tst->arch.vex.guest_R11;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R11,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R11], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_R12] = tst->arch.vex.guest_R12;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R12,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R12], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_R13] = tst->arch.vex.guest_R13;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R13,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R13], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_R14] = tst->arch.vex.guest_R14;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R14,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R14], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_R15] = tst->arch.vex.guest_R15;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_R15,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_R15], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_RSP] = tst->arch.vex.guest_RSP;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_amd64_RSP,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSP], sizeof(UWord));
+
+   /* ERR and TRAPNO */
+   uc->uc_mcontext.gregs[VKI_REG_ERR] = 0;
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_ERR], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_TRAPNO] = 0;
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_TRAPNO], sizeof(UWord));
+
+   /* Segment registers */
+   /* Valgrind does not support moves from/to segment registers on AMD64.  The
+      values returned below are the ones that are set by the kernel when
+      a program is started. */
+   uc->uc_mcontext.gregs[VKI_REG_CS] = VKI_UCS_SEL;
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_CS], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_DS] = 0;
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_DS], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_SS] = VKI_UDS_SEL;
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_SS], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_ES] = 0;
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_ES], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_FS] = 0;
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_FS], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_GS] = 0;
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_GS], sizeof(UWord));
+
+   /* Segment bases */
+   uc->uc_mcontext.gregs[VKI_REG_FSBASE] = tst->arch.vex.guest_FS_CONST;
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_FSBASE], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_REG_GSBASE] = 0;
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)&uc->uc_mcontext.gregs[VKI_REG_GSBASE], sizeof(UWord));
+
+   /* Handle rflags.  Refer to the x86-solaris variant of this code for
+      a detailed description. */
+   uc->uc_mcontext.gregs[VKI_REG_RFL] =
+      LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
+   VG_TRACK(post_mem_write, part, tid,
+         (Addr)&uc->uc_mcontext.gregs[VKI_REG_RFL], sizeof(UWord));
+   VKI_UC_GUEST_CC_OP(uc) = tst->arch.vex.guest_CC_OP;
+   VKI_UC_GUEST_CC_NDEP(uc) = tst->arch.vex.guest_CC_NDEP;
+   VKI_UC_GUEST_CC_DEP1(uc) = tst->arch.vex.guest_CC_DEP1;
+   VG_TRACK(copy_reg_to_mem, part, tid,
+            offsetof(VexGuestAMD64State, guest_CC_DEP1),
+            (Addr)&VKI_UC_GUEST_CC_DEP1(uc), sizeof(UWord));
+   VKI_UC_GUEST_CC_DEP2(uc) = tst->arch.vex.guest_CC_DEP2;
+   VG_TRACK(copy_reg_to_mem, part, tid,
+            offsetof(VexGuestAMD64State, guest_CC_DEP2),
+            (Addr)&VKI_UC_GUEST_CC_DEP2(uc), sizeof(UWord));
+   VKI_UC_GUEST_RFLAGS_NEG(uc) = ~uc->uc_mcontext.gregs[VKI_REG_RFL];
+   /* Calculate a checksum. */
+   {
+      ULong buf[5];
+      ULong checksum;
+
+      buf[0] = VKI_UC_GUEST_CC_OP(uc);
+      buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
+      buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
+      buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
+      buf[4] = uc->uc_mcontext.gregs[VKI_REG_RFL];
+      checksum = ML_(fletcher64)((UInt*)&buf, sizeof(buf) / sizeof(UInt));
+      VKI_UC_GUEST_RFLAGS_CHECKSUM(uc) = checksum;
+   }
+
+   /* FPU */
+   /* The fpregset_t structure on amd64 follows the layout that is used by the
+      FXSAVE instruction, therefore it is only necessary to call a VEX
+      function that simulates this instruction. */
+   LibVEX_GuestAMD64_fxsave(&tst->arch.vex, (HWord)fs);
+
+   /* Control word */
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->cw, sizeof(fs->cw));
+   /* Status word */
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->sw, sizeof(fs->sw));
+   /* Compressed tag word */
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->fctw, sizeof(fs->fctw));
+   /* Unused */
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->__fx_rsvd,
+            sizeof(fs->__fx_rsvd));
+   vg_assert(fs->__fx_rsvd == 0);
+   /* Last x87 opcode */
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->fop, sizeof(fs->fop));
+   vg_assert(fs->fop == 0);
+   /* Last x87 instruction pointer */
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->rip, sizeof(fs->rip));
+   vg_assert(fs->rip == 0);
+   /* Last x87 data pointer */
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->rdp, sizeof(fs->rdp));
+   vg_assert(fs->rdp == 0);
+   /* Media-instruction control and status register */
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
+   /* Supported features in MXCSR */
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->mxcsr_mask,
+            sizeof(fs->mxcsr_mask));
+
+   /* ST registers */
+   for (i = 0; i < 8; i++) {
+      Addr addr = (Addr)&fs->st[i];
+      /* x87 uses 80b FP registers but VEX uses only 64b registers, thus we
+         have to lie here. :< */
+      VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
+               guest_FPREG[i]), addr, sizeof(ULong));
+      VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
+               guest_FPREG[i]), addr + 8, sizeof(UShort));
+   }
+
+   /* XMM registers */
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
+            guest_YMM0), (Addr)&fs->xmm[0], sizeof(U128));
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
+            guest_YMM1), (Addr)&fs->xmm[1], sizeof(U128));
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
+            guest_YMM2), (Addr)&fs->xmm[2], sizeof(U128));
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
+            guest_YMM3), (Addr)&fs->xmm[3], sizeof(U128));
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
+            guest_YMM4), (Addr)&fs->xmm[4], sizeof(U128));
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
+            guest_YMM5), (Addr)&fs->xmm[5], sizeof(U128));
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
+            guest_YMM6), (Addr)&fs->xmm[6], sizeof(U128));
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestAMD64State,
+            guest_YMM7), (Addr)&fs->xmm[7], sizeof(U128));
+
+   /* Status word (sw) at exception */
+   fs->status = 0;
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->status, sizeof(fs->status));
+
+   /* MXCSR at exception */
+   fs->xstatus = 0;
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->xstatus,
+            sizeof(fs->xstatus));
+}
+
+/* Architecture-specific part of VG_(restore_context). */
+void ML_(restore_machine_context)(ThreadId tid, vki_ucontext_t *uc,
+                                  CorePart part, Bool esp_is_thrptr)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   struct vki_fpchip_state *fs
+      = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
+
+   /* CPU */
+   if (uc->uc_flags & VKI_UC_CPU) {
+      /* Common registers */
+      tst->arch.vex.guest_RIP = uc->uc_mcontext.gregs[VKI_REG_RIP];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RIP], OFFSET_amd64_RIP,
+               sizeof(UWord));
+      tst->arch.vex.guest_RAX = uc->uc_mcontext.gregs[VKI_REG_RAX];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RAX], OFFSET_amd64_RAX,
+               sizeof(UWord));
+      tst->arch.vex.guest_RBX = uc->uc_mcontext.gregs[VKI_REG_RBX];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBX], OFFSET_amd64_RBX,
+               sizeof(UWord));
+      tst->arch.vex.guest_RCX = uc->uc_mcontext.gregs[VKI_REG_RCX];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RCX], OFFSET_amd64_RCX,
+               sizeof(UWord));
+      tst->arch.vex.guest_RDX = uc->uc_mcontext.gregs[VKI_REG_RDX];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDX], OFFSET_amd64_RDX,
+               sizeof(UWord));
+      tst->arch.vex.guest_RBP = uc->uc_mcontext.gregs[VKI_REG_RBP];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RBP], OFFSET_amd64_RBP,
+               sizeof(UWord));
+      tst->arch.vex.guest_RSI = uc->uc_mcontext.gregs[VKI_REG_RSI];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSI], OFFSET_amd64_RSI,
+               sizeof(UWord));
+      tst->arch.vex.guest_RDI = uc->uc_mcontext.gregs[VKI_REG_RDI];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RDI], OFFSET_amd64_RDI,
+               sizeof(UWord));
+      tst->arch.vex.guest_R8 = uc->uc_mcontext.gregs[VKI_REG_R8];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R8], OFFSET_amd64_R8,
+               sizeof(UWord));
+      tst->arch.vex.guest_R9 = uc->uc_mcontext.gregs[VKI_REG_R9];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R9], OFFSET_amd64_R9,
+               sizeof(UWord));
+      tst->arch.vex.guest_R10 = uc->uc_mcontext.gregs[VKI_REG_R10];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R10], OFFSET_amd64_R10,
+               sizeof(UWord));
+      tst->arch.vex.guest_R11 = uc->uc_mcontext.gregs[VKI_REG_R11];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R11], OFFSET_amd64_R11,
+               sizeof(UWord));
+      tst->arch.vex.guest_R12 = uc->uc_mcontext.gregs[VKI_REG_R12];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R12], OFFSET_amd64_R12,
+               sizeof(UWord));
+      tst->arch.vex.guest_R13 = uc->uc_mcontext.gregs[VKI_REG_R13];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R13], OFFSET_amd64_R13,
+               sizeof(UWord));
+      tst->arch.vex.guest_R14 = uc->uc_mcontext.gregs[VKI_REG_R14];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R14], OFFSET_amd64_R14,
+               sizeof(UWord));
+      tst->arch.vex.guest_R15 = uc->uc_mcontext.gregs[VKI_REG_R15];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_R15], OFFSET_amd64_R15,
+               sizeof(UWord));
+      tst->arch.vex.guest_RSP = uc->uc_mcontext.gregs[VKI_REG_RSP];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_RSP], OFFSET_amd64_RSP,
+               sizeof(UWord));
+
+      /* Ignore ERR and TRAPNO. */
+
+      /* Ignore segment registers. */
+
+      /* Segment bases */
+      tst->arch.vex.guest_FS_CONST = uc->uc_mcontext.gregs[VKI_REG_FSBASE];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_REG_FSBASE],
+               offsetof(VexGuestAMD64State, guest_FS_CONST), sizeof(UWord));
+
+      /* Rflags.  Refer to the x86-solaris variant of this code for a detailed
+         description. */
+      {
+         ULong rflags;
+         ULong orig_rflags;
+         ULong new_rflags;
+         Bool ok_restore = False;
+
+         VG_TRACK(pre_mem_read, part, tid,
+                  "restore_machine_context(uc->uc_mcontext.gregs[VKI_REG_RFL])",
+                  (Addr)&uc->uc_mcontext.gregs[VKI_REG_RFL], sizeof(UWord));
+         rflags = uc->uc_mcontext.gregs[VKI_REG_RFL];
+         orig_rflags = LibVEX_GuestAMD64_get_rflags(&tst->arch.vex);
+         new_rflags = rflags;
+         /* The kernel disallows the ID flag to be changed via the setcontext
+            call, thus do the same. */
+         if (orig_rflags & VKI_RFLAGS_ID_BIT)
+            new_rflags |= VKI_RFLAGS_ID_BIT;
+         else
+            new_rflags &= ~VKI_RFLAGS_ID_BIT;
+         LibVEX_GuestAMD64_put_rflags(new_rflags, &tst->arch.vex);
+         VG_TRACK(post_reg_write, part, tid,
+                  offsetof(VexGuestAMD64State, guest_CC_DEP1), sizeof(UWord));
+         VG_TRACK(post_reg_write, part, tid,
+                  offsetof(VexGuestAMD64State, guest_CC_DEP2), sizeof(UWord));
+
+         if (rflags != ~VKI_UC_GUEST_RFLAGS_NEG(uc)) {
+            VG_(debugLog)(1, "syswrap-solaris",
+                             "The rflags value was restored from an "
+                             "explicitly set value in thread %d.\n", tid);
+            ok_restore = True;
+         }
+         else {
+            ULong buf[5];
+            ULong checksum;
+
+            buf[0] = VKI_UC_GUEST_CC_OP(uc);
+            buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
+            buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
+            buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
+            buf[4] = rflags;
+            checksum = ML_(fletcher64)((UInt*)&buf,
+                                       sizeof(buf) / sizeof(UInt));
+            if (checksum == VKI_UC_GUEST_RFLAGS_CHECKSUM(uc)) {
+               /* Check ok, the full restoration is possible. */
+               VG_(debugLog)(1, "syswrap-solaris",
+                                "The CC_* guest state values were fully "
+                                "restored in thread %d.\n", tid);
+               ok_restore = True;
+
+               tst->arch.vex.guest_CC_OP = VKI_UC_GUEST_CC_OP(uc);
+               tst->arch.vex.guest_CC_NDEP = VKI_UC_GUEST_CC_NDEP(uc);
+               tst->arch.vex.guest_CC_DEP1 = VKI_UC_GUEST_CC_DEP1(uc);
+               VG_TRACK(copy_mem_to_reg, part, tid,
+                        (Addr)&VKI_UC_GUEST_CC_DEP1(uc),
+                        offsetof(VexGuestAMD64State, guest_CC_DEP1),
+                        sizeof(UWord));
+               tst->arch.vex.guest_CC_DEP2 = VKI_UC_GUEST_CC_DEP2(uc);
+               VG_TRACK(copy_mem_to_reg, part, tid,
+                        (Addr)&VKI_UC_GUEST_CC_DEP2(uc),
+                        offsetof(VexGuestAMD64State, guest_CC_DEP2),
+                        sizeof(UWord));
+            }
+         }
+
+         if (!ok_restore)
+            VG_(debugLog)(1, "syswrap-solaris",
+                             "Cannot fully restore the CC_* guest state "
+                             "values, using approximate rflags in thread "
+                             "%d.\n", tid);
+      }
+   }
+
+   if (uc->uc_flags & VKI_UC_FPU) {
+      /* FPU */
+      VexEmNote note;
+      SizeT i;
+
+      /* x87 */
+      /* Control word */
+      VG_TRACK(pre_mem_read, part, tid,
+               "restore_machine_context(uc->uc_mcontext.fpregs..cw)",
+               (Addr)&fs->cw, sizeof(fs->cw));
+      /* Status word */
+      VG_TRACK(pre_mem_read, part, tid,
+               "restore_machine_context(uc->uc_mcontext.fpregs..sw)",
+               (Addr)&fs->sw, sizeof(fs->sw));
+      /* Compressed tag word */
+      VG_TRACK(pre_mem_read, part, tid,
+               "restore_machine_context(uc->uc_mcontext.fpregs..fctw)",
+               (Addr)&fs->fctw, sizeof(fs->fctw));
+      /* Last x87 opcode */
+      VG_TRACK(pre_mem_read, part, tid,
+               "restore_machine_context(uc->uc_mcontext.fpregs..fop)",
+               (Addr)&fs->fop, sizeof(fs->fop));
+      /* Last x87 instruction pointer */
+      VG_TRACK(pre_mem_read, part, tid,
+               "restore_machine_context(uc->uc_mcontext.fpregs..rip)",
+               (Addr)&fs->rip, sizeof(fs->rip));
+      /* Last x87 data pointer */
+      VG_TRACK(pre_mem_read, part, tid,
+               "restore_machine_context(uc->uc_mcontext.fpregs..rdp)",
+               (Addr)&fs->rdp, sizeof(fs->rdp));
+      /* Media-instruction control and status register */
+      VG_TRACK(pre_mem_read, part, tid,
+               "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr)",
+               (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
+      /* Supported features in MXCSR */
+      VG_TRACK(pre_mem_read, part, tid,
+               "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr_mask)",
+               (Addr)&fs->mxcsr_mask, sizeof(fs->mxcsr_mask));
+
+      /* ST registers */
+      for (i = 0; i < 8; i++) {
+         Addr addr = (Addr)&fs->st[i];
+         VG_TRACK(copy_mem_to_reg, part, tid, addr,
+                  offsetof(VexGuestAMD64State, guest_FPREG[i]), sizeof(ULong));
+      }
+
+      /* XMM registers */
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[0],
+               offsetof(VexGuestAMD64State, guest_YMM0), sizeof(U128));
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[1],
+               offsetof(VexGuestAMD64State, guest_YMM1), sizeof(U128));
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[2],
+               offsetof(VexGuestAMD64State, guest_YMM2), sizeof(U128));
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[3],
+               offsetof(VexGuestAMD64State, guest_YMM3), sizeof(U128));
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[4],
+               offsetof(VexGuestAMD64State, guest_YMM4), sizeof(U128));
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[5],
+               offsetof(VexGuestAMD64State, guest_YMM5), sizeof(U128));
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[6],
+               offsetof(VexGuestAMD64State, guest_YMM6), sizeof(U128));
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[7],
+               offsetof(VexGuestAMD64State, guest_YMM7), sizeof(U128));
+
+      note = LibVEX_GuestAMD64_fxrstor((HWord)fs, &tst->arch.vex);
+      if (note != EmNote_NONE)
+         VG_(message)(Vg_UserMsg,
+                      "Error restoring FP state in thread %d: %s.\n",
+                      tid, LibVEX_EmNote_string(note));
+   }
+}
+
+
+/* ---------------------------------------------------------------------
+   PRE/POST wrappers for AMD64/Solaris-specific syscalls
+   ------------------------------------------------------------------ */
+
+#define PRE(name)       DEFN_PRE_TEMPLATE(amd64_solaris, name)
+#define POST(name)      DEFN_POST_TEMPLATE(amd64_solaris, name)
+
+/* implementation */
+
+#undef PRE
+#undef POST
+
+#endif // defined(VGP_amd64_solaris)
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_syswrap/syswrap-darwin.c b/coregrind/m_syswrap/syswrap-darwin.c
index f804ade..b957b04 100644
--- a/coregrind/m_syswrap/syswrap-darwin.c
+++ b/coregrind/m_syswrap/syswrap-darwin.c
@@ -3451,7 +3451,7 @@
    } else {
       envp = VG_(env_clone)( (HChar**)ARG5 );
       vg_assert(envp);
-      VG_(env_remove_valgrind_env_stuff)( envp );
+      VG_(env_remove_valgrind_env_stuff)( envp, /* ro_strings */ False, NULL);
    }
 
    if (trace_this_child) {
diff --git a/coregrind/m_syswrap/syswrap-generic.c b/coregrind/m_syswrap/syswrap-generic.c
index 3e1a826..10dd80a 100644
--- a/coregrind/m_syswrap/syswrap-generic.c
+++ b/coregrind/m_syswrap/syswrap-generic.c
@@ -30,7 +30,7 @@
    The GNU General Public License is contained in the file COPYING.
 */
 
-#if defined(VGO_linux) || defined(VGO_darwin)
+#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 #include "pub_core_basics.h"
 #include "pub_core_vki.h"
@@ -145,7 +145,7 @@
    presented with bogus client addresses.  Is not used for generating
    user-visible errors. */
 
-Bool ML_(safe_to_deref) ( void* start, SizeT size )
+Bool ML_(safe_to_deref) ( const void *start, SizeT size )
 {
    return VG_(am_is_valid_for_client)( (Addr)start, size, VKI_PROT_READ );
 }
@@ -549,8 +549,7 @@
 
 
 /* Note the fact that a file descriptor was just closed. */
-static
-void record_fd_close(Int fd)
+void ML_(record_fd_close)(Int fd)
 {
    OpenFd *i = allocated_fds;
 
@@ -634,6 +633,32 @@
    ML_(record_fd_open_with_given_name)(tid, fd, NULL);
 }
 
+// Return if a given file descriptor is already recorded.
+Bool ML_(fd_recorded)(Int fd)
+{
+   OpenFd *i = allocated_fds;
+   while (i) {
+      if (i->fd == fd)
+         return True;
+      i = i->next;
+   }
+   return False;
+}
+
+/* Returned string must not be modified nor free'd. */
+const HChar *ML_(find_fd_recorded_by_fd)(Int fd)
+{
+   OpenFd *i = allocated_fds;
+
+   while (i) {
+      if (i->fd == fd)
+         return i->pathname;
+      i = i->next;
+   }
+
+   return NULL;
+}
+
 static
 HChar *unix_to_name(struct vki_sockaddr_un *sa, UInt len, HChar *name)
 {
@@ -901,6 +926,44 @@
 #elif defined(VGO_darwin)
    init_preopened_fds_without_proc_self_fd();
 
+#elif defined(VGO_solaris)
+   Int ret;
+   Char buf[VKI_MAXGETDENTS_SIZE];
+   SysRes f;
+
+   f = VG_(open)("/proc/self/fd", VKI_O_RDONLY, 0);
+   if (sr_isError(f)) {
+      init_preopened_fds_without_proc_self_fd();
+      return;
+   }
+
+   while ((ret = VG_(getdents64)(sr_Res(f), (struct vki_dirent64 *) buf,
+                                 sizeof(buf))) > 0) {
+      Int i = 0;
+      while (i < ret) {
+         /* Proceed one entry. */
+         struct vki_dirent64 *d = (struct vki_dirent64 *) (buf + i);
+         if (VG_(strcmp)(d->d_name, ".") && VG_(strcmp)(d->d_name, "..")) {
+            HChar *s;
+            Int fno = VG_(strtoll10)(d->d_name, &s);
+            if (*s == '\0') {
+               if (fno != sr_Res(f))
+                  if (VG_(clo_track_fds))
+                     ML_(record_fd_open_named)(-1, fno);
+            } else {
+               VG_(message)(Vg_DebugMsg,
+                     "Warning: invalid file name in /proc/self/fd: %s\n",
+                     d->d_name);
+            }
+         }
+
+         /* Move on the next entry. */
+         i += d->d_reclen;
+      }
+   }
+
+   VG_(close)(sr_Res(f));
+
 #else
 #  error Unknown OS
 #endif
@@ -1046,7 +1109,7 @@
                              struct vki_sockaddr *sa, UInt salen )
 {
    HChar *outmsg;
-   struct vki_sockaddr_un*  sun  = (struct vki_sockaddr_un *)sa;
+   struct vki_sockaddr_un*  saun = (struct vki_sockaddr_un *)sa;
    struct vki_sockaddr_in*  sin  = (struct vki_sockaddr_in *)sa;
    struct vki_sockaddr_in6* sin6 = (struct vki_sockaddr_in6 *)sa;
 #  ifdef VKI_AF_BLUETOOTH
@@ -1069,7 +1132,7 @@
                   
       case VKI_AF_UNIX:
          VG_(sprintf) ( outmsg, description, "sun_path" );
-         PRE_MEM_RASCIIZ( outmsg, (Addr) sun->sun_path );
+         PRE_MEM_RASCIIZ( outmsg, (Addr) saun->sun_path );
          // GrP fixme max of sun_len-2? what about nul char?
          break;
                      
@@ -1717,8 +1780,11 @@
 
    arg.buf = &buf;
 
-#  ifdef __NR_semctl
+#  if defined(__NR_semctl)
    res = VG_(do_syscall4)(__NR_semctl, semid, 0, VKI_IPC_STAT, *(UWord *)&arg);
+#  elif defined(__NR_semsys) /* Solaris */
+   res = VG_(do_syscall5)(__NR_semsys, VKI_SEMCTL, semid, 0, VKI_IPC_STAT,
+                          *(UWord *)&arg);
 #  else
    res = VG_(do_syscall5)(__NR_ipc, 3 /* IPCOP_semctl */, semid, 0,
                           VKI_IPC_STAT, (UWord)&arg);
@@ -1761,6 +1827,11 @@
 #if defined(VKI_SEM_STAT)
    case VKI_SEM_STAT|VKI_IPC_64:
 #endif
+#endif
+#if defined(VKI_IPC_STAT64)
+   case VKI_IPC_STAT64:
+#endif
+#if defined(VKI_IPC_64) || defined(VKI_IPC_STAT64)
       PRE_MEM_WRITE( "semctl(IPC_STAT, arg.buf)",
                      (Addr)arg.buf, sizeof(struct vki_semid64_ds) );
       break;
@@ -1773,6 +1844,11 @@
 
 #if defined(VKI_IPC_64)
    case VKI_IPC_SET|VKI_IPC_64:
+#endif
+#if defined(VKI_IPC_SET64)
+   case VKI_IPC_SET64:
+#endif
+#if defined(VKI_IPC64) || defined(VKI_IPC_SET64)
       PRE_MEM_READ( "semctl(IPC_SET, arg.buf)",
                     (Addr)arg.buf, sizeof(struct vki_semid64_ds) );
       break;
@@ -1826,6 +1902,11 @@
 #if defined(VKI_IPC_64)
    case VKI_IPC_STAT|VKI_IPC_64:
    case VKI_SEM_STAT|VKI_IPC_64:
+#endif
+#if defined(VKI_IPC_STAT64)
+   case VKI_IPC_STAT64:
+#endif
+#if defined(VKI_IPC_64) || defined(VKI_IPC_STAT64)
       POST_MEM_WRITE( (Addr)arg.buf, sizeof(struct vki_semid64_ds) );
       break;
 #endif
@@ -1847,7 +1928,7 @@
 static
 SizeT get_shm_size ( Int shmid )
 {
-#ifdef __NR_shmctl
+#if defined(__NR_shmctl)
 #  ifdef VKI_IPC_64
    struct vki_shmid64_ds buf;
 #    if defined(VGP_amd64_linux) || defined(VGP_arm64_linux)
@@ -1862,6 +1943,10 @@
    struct vki_shmid_ds buf;
    SysRes __res = VG_(do_syscall3)(__NR_shmctl, shmid, VKI_IPC_STAT, (UWord)&buf);
 #  endif /* def VKI_IPC_64 */
+#elif defined(__NR_shmsys) /* Solaris */
+   struct vki_shmid_ds buf;
+   SysRes __res = VG_(do_syscall4)(__NR_shmsys, VKI_SHMCTL, shmid, VKI_IPC_STAT,
+                         (UWord)&buf);
 #else
    struct vki_shmid_ds buf;
    SysRes __res = VG_(do_syscall5)(__NR_ipc, 24 /* IPCOP_shmctl */, shmid,
@@ -2157,6 +2242,16 @@
    if (arg4 & VKI_MAP_FIXED) {
       mreq.rkind = MFixed;
    } else
+#if defined(VKI_MAP_ALIGN) /* Solaris specific */
+   if (arg4 & VKI_MAP_ALIGN) {
+      mreq.rkind = MAlign;
+      if (mreq.start == 0) {
+         mreq.start = VKI_PAGE_SIZE;
+      }
+      /* VKI_MAP_FIXED and VKI_MAP_ALIGN don't like each other. */
+      arg4 &= ~VKI_MAP_ALIGN;
+   } else
+#endif
    if (arg1 != 0) {
       mreq.rkind = MHint;
    } else {
@@ -2678,7 +2773,7 @@
 }
 
 // Pre_read a char** argument.
-static void pre_argv_envp(Addr a, ThreadId tid, const HChar* s1, const HChar* s2)
+void ML_(pre_argv_envp)(Addr a, ThreadId tid, const HChar *s1, const HChar *s2)
 {
    while (True) {
       Addr a_deref;
@@ -2729,9 +2824,9 @@
                  char *, filename, char **, argv, char **, envp);
    PRE_MEM_RASCIIZ( "execve(filename)", ARG1 );
    if (ARG2 != 0)
-      pre_argv_envp( ARG2, tid, "execve(argv)", "execve(argv[i])" );
+      ML_(pre_argv_envp)( ARG2, tid, "execve(argv)", "execve(argv[i])" );
    if (ARG3 != 0)
-      pre_argv_envp( ARG3, tid, "execve(envp)", "execve(envp[i])" );
+      ML_(pre_argv_envp)( ARG3, tid, "execve(envp)", "execve(envp[i])" );
 
    vg_assert(VG_(is_valid_tid)(tid));
    tst = VG_(get_ThreadState)(tid);
@@ -2850,7 +2945,7 @@
    } else {
       envp = VG_(env_clone)( (HChar**)ARG3 );
       if (envp == NULL) goto hosed;
-      VG_(env_remove_valgrind_env_stuff)( envp );
+      VG_(env_remove_valgrind_env_stuff)( envp, True /*ro_strings*/, NULL );
    }
 
    if (trace_this_child) {
@@ -3089,7 +3184,7 @@
 
 POST(sys_close)
 {
-   if (VG_(clo_track_fds)) record_fd_close(ARG1);
+   if (VG_(clo_track_fds)) ML_(record_fd_close)(ARG1);
 }
 
 PRE(sys_dup)
@@ -3160,6 +3255,7 @@
    POST_MEM_WRITE( ARG2, sizeof(struct vki_stat) );
 }
 
+#if !defined(VGO_solaris)
 static vki_sigset_t fork_saved_mask;
 
 // In Linux, the sys_fork() function varies across architectures, but we
@@ -3178,6 +3274,8 @@
    VG_(sigfillset)(&mask);
    VG_(sigprocmask)(VKI_SIG_SETMASK, &mask, &fork_saved_mask);
 
+   VG_(do_atfork_pre)(tid);
+
    SET_STATUS_from_SysRes( VG_(do_syscall0)(__NR_fork) );
 
    if (!SUCCESS) return;
@@ -3194,8 +3292,6 @@
 #  error Unknown OS
 #endif
 
-   VG_(do_atfork_pre)(tid);
-
    if (is_child) {
       VG_(do_atfork_child)(tid);
 
@@ -3222,6 +3318,7 @@
       VG_(sigprocmask)(VKI_SIG_SETMASK, &fork_saved_mask, NULL);
    }
 }
+#endif // !defined(VGO_solaris)
 
 PRE(sys_ftruncate)
 {
@@ -3499,9 +3596,15 @@
       According to Simon Hausmann, _IOC_READ means the kernel
       writes a value to the ioctl value passed from the user
       space and the other way around with _IOC_WRITE. */
-   
+
+#if defined(VGO_solaris)
+   /* Majority of Solaris ioctl requests does not honour direction hints. */
+   UInt dir  = _VKI_IOC_NONE;
+#else   
    UInt dir  = _VKI_IOC_DIR(request);
+#endif
    UInt size = _VKI_IOC_SIZE(request);
+
    if (SimHintiS(SimHint_lax_ioctls, VG_(clo_sim_hints))) {
       /* 
        * Be very lax about ioctl handling; the only
@@ -3620,7 +3723,7 @@
 PRE(sys_kill)
 {
    PRINT("sys_kill ( %ld, %ld )", ARG1,ARG2);
-   PRE_REG_READ2(long, "kill", int, pid, int, sig);
+   PRE_REG_READ2(long, "kill", int, pid, int, signal);
    if (!ML_(client_signal_OK)(ARG2)) {
       SET_STATUS_Failure( VKI_EINVAL );
       return;
@@ -3799,6 +3902,49 @@
       POST_MEM_WRITE( ARG2, sizeof(struct vki_timespec) );
 }
 
+#if defined(VGO_linux) || defined(VGO_solaris)
+/* Handles the case where the open is of /proc/self/auxv or
+   /proc/<pid>/auxv, and just gives out a copy of the fd for the
+   fake file we cooked up at startup (in m_main).  Also, seeks the
+   cloned fd back to the start.
+   Returns True if auxv open was handled (status is set). */
+Bool ML_(handle_auxv_open)(SyscallStatus *status, const HChar *filename,
+                           int flags)
+{
+   HChar  name[30];   // large enough
+
+   if (!ML_(safe_to_deref)((const void *) filename, 1))
+      return False;
+
+   /* Opening /proc/<pid>/auxv or /proc/self/auxv? */
+   VG_(sprintf)(name, "/proc/%d/auxv", VG_(getpid)());
+   if (!VG_STREQ(filename, name) && !VG_STREQ(filename, "/proc/self/auxv"))
+      return False;
+
+   /* Allow to open the file only for reading. */
+   if (flags & (VKI_O_WRONLY | VKI_O_RDWR)) {
+      SET_STATUS_Failure(VKI_EACCES);
+      return True;
+   }
+
+#  if defined(VGO_solaris)
+   VG_(sprintf)(name, "/proc/self/fd/%d", VG_(cl_auxv_fd));
+   SysRes sres = VG_(open)(name, flags, 0);
+   SET_STATUS_from_SysRes(sres);
+#  else
+   SysRes sres = VG_(dup)(VG_(cl_auxv_fd));
+   SET_STATUS_from_SysRes(sres);
+   if (!sr_isError(sres)) {
+      OffT off = VG_(lseek)(sr_Res(sres), 0, VKI_SEEK_SET);
+      if (off < 0)
+         SET_STATUS_Failure(VKI_EMFILE);
+   }
+#  endif
+
+   return True;
+}
+#endif // defined(VGO_linux) || defined(VGO_solaris)
+
 PRE(sys_open)
 {
    if (ARG2 & VKI_O_CREAT) {
@@ -3840,30 +3986,9 @@
       }
    }
 
-   /* Handle the case where the open is of /proc/self/auxv or
-      /proc/<pid>/auxv, and just give it a copy of the fd for the
-      fake file we cooked up at startup (in m_main).  Also, seek the
-      cloned fd back to the start. */
-   {
-      HChar  name[30];   // large enough
-      HChar* arg1s = (HChar*) ARG1;
-      SysRes sres;
-
-      VG_(sprintf)(name, "/proc/%d/auxv", VG_(getpid)());
-      if (ML_(safe_to_deref)( arg1s, 1 ) &&
-          (VG_STREQ(arg1s, name) || VG_STREQ(arg1s, "/proc/self/auxv"))
-         )
-      {
-         sres = VG_(dup)( VG_(cl_auxv_fd) );
-         SET_STATUS_from_SysRes( sres );
-         if (!sr_isError(sres)) {
-            OffT off = VG_(lseek)( sr_Res(sres), 0, VKI_SEEK_SET );
-            if (off < 0)
-               SET_STATUS_Failure( VKI_EMFILE );
-         }
-         return;
-      }
-   }
+   /* Handle also the case of /proc/self/auxv or /proc/<pid>/auxv. */
+   if (ML_(handle_auxv_open)(status, (const HChar *)ARG1, ARG2))
+      return;
 #endif // defined(VGO_linux)
 
    /* Otherwise handle normally */
@@ -3914,6 +4039,11 @@
    if (!ok && ARG1 == 2/*stderr*/ 
            && SimHintiS(SimHint_enable_outer, VG_(clo_sim_hints)))
       ok = True;
+#if defined(VGO_solaris)
+   if (!ok && VG_(vfork_fildes_addr) != NULL &&
+       *VG_(vfork_fildes_addr) >= 0 && *VG_(vfork_fildes_addr) == ARG1)
+      ok = True;
+#endif
    if (!ok)
       SET_STATUS_Failure( VKI_EBADF );
    else
@@ -4004,7 +4134,21 @@
          SET_STATUS_from_SysRes( VG_(do_syscall3)(saved, (UWord)name, 
                                                          ARG2, ARG3));
       } else
-#endif // defined(VGO_linux)
+#elif defined(VGO_solaris)
+      /* Same for Solaris, but /proc/self/path/a.out and
+         /proc/<pid>/path/a.out. */
+      HChar  name[30];   // large enough
+      HChar* arg1s = (HChar*) ARG1;
+      VG_(sprintf)(name, "/proc/%d/path/a.out", VG_(getpid)());
+      if (ML_(safe_to_deref)(arg1s, 1) &&
+          (VG_STREQ(arg1s, name) || VG_STREQ(arg1s, "/proc/self/path/a.out"))
+         )
+      {
+         VG_(sprintf)(name, "/proc/self/path/%d", VG_(cl_exec_fd));
+         SET_STATUS_from_SysRes( VG_(do_syscall3)(saved, (UWord)name,
+                                                         ARG2, ARG3));
+      } else
+#endif
       {
          /* Normal case */
          SET_STATUS_from_SysRes( VG_(do_syscall3)(saved, ARG1, ARG2, ARG3));
@@ -4183,7 +4327,15 @@
          SET_STATUS_Failure( VKI_EPERM );
       }
       else {
-         VG_(threads)[tid].client_stack_szB = ((struct vki_rlimit *)ARG2)->rlim_cur;
+         /* Change the value of client_stack_szB to the rlim_cur value but
+            only if it is smaller than the size of the allocated stack for the
+            client.
+            TODO: All platforms should set VG_(clstk_max_size) as part of their
+                  setup_client_stack(). */
+         if ((VG_(clstk_max_size) == 0)
+             || (((struct vki_rlimit *) ARG2)->rlim_cur <= VG_(clstk_max_size)))
+            VG_(threads)[tid].client_stack_szB = ((struct vki_rlimit *)ARG2)->rlim_cur;
+
          VG_(client_rlimit_stack) = *(struct vki_rlimit *)ARG2;
          SET_STATUS_Success( 0 );
       }
@@ -4411,6 +4563,16 @@
       PRE_MEM_WRITE( "sigaltstack(oss)", ARG2, sizeof(vki_stack_t) );
    }
 
+   /* Be safe. */
+   if (ARG1 && !ML_(safe_to_deref((void*)ARG1, sizeof(vki_stack_t)))) {
+      SET_STATUS_Failure(VKI_EFAULT);
+      return;
+   }
+   if (ARG2 && !ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t)))) {
+      SET_STATUS_Failure(VKI_EFAULT);
+      return;
+   }
+
    SET_STATUS_from_SysRes( 
       VG_(do_sys_sigaltstack) (tid, (vki_stack_t*)ARG1, 
                               (vki_stack_t*)ARG2)
@@ -4433,7 +4595,7 @@
 #undef PRE
 #undef POST
 
-#endif // defined(VGO_linux) || defined(VGO_darwin)
+#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 /*--------------------------------------------------------------------*/
 /*--- end                                                          ---*/
diff --git a/coregrind/m_syswrap/syswrap-main.c b/coregrind/m_syswrap/syswrap-main.c
index 71ef2b7..0479400 100644
--- a/coregrind/m_syswrap/syswrap-main.c
+++ b/coregrind/m_syswrap/syswrap-main.c
@@ -89,6 +89,14 @@
    amd64-darwin.  Apparently 0(%esp) is some kind of return address
    (perhaps for syscalls done with "sysenter"?)  I don't think it is
    relevant for syscalls done with "int $0x80/1/2".
+
+   SOLARIS:
+   x86    eax +4   +8   +12  +16  +20  +24  +28  +32  edx:eax, eflags.c
+   amd64  rax rdi  rsi  rdx  r10  r8   r9   +8   +16  rdx:rax, rflags.c
+
+   "+N" denotes "in memory at N(%esp)". Solaris also supports fasttrap
+   syscalls. Fasttraps do not take any parameters (except of the sysno in eax)
+   and never fail (if the sysno is valid).
 */
 
 /* This is the top level of the system-call handler module.  All
@@ -172,6 +180,11 @@
      s390x:  Success(N) ==>  r2 = N
              Fail(N)    ==>  r2 = -N
 
+     Solaris:
+     x86:    Success(N) ==>  edx:eax = N, cc = 0
+             Fail(N)    ==>      eax = N, cc = 1
+     Same applies for fasttraps except they never fail.
+
    * The post wrapper is called if:
 
      - it exists, and
@@ -297,6 +310,18 @@
                                            const vki_sigset_t *syscall_mask,
                                            const vki_sigset_t *restore_mask,
                                            Word sigsetSzB ); /* unused */
+#elif defined(VGO_solaris)
+extern
+UWord ML_(do_syscall_for_client_WRK)( Word syscallno,
+                                      void* guest_state,
+                                      const vki_sigset_t *syscall_mask,
+                                      const vki_sigset_t *restore_mask,
+                                      UChar *cflag);
+UWord ML_(do_syscall_for_client_dret_WRK)( Word syscallno,
+                                           void* guest_state,
+                                           const vki_sigset_t *syscall_mask,
+                                           const vki_sigset_t *restore_mask,
+                                           UChar *cflag);
 #else
 #  error "Unknown OS"
 #endif
@@ -339,6 +364,35 @@
          /*NOTREACHED*/
          break;
    }
+#  elif defined(VGO_solaris)
+   UChar cflag;
+
+   /* Fasttraps or anything else cannot go through this path. */
+   vg_assert(VG_SOLARIS_SYSNO_CLASS(syscallno)
+             == VG_SOLARIS_SYSCALL_CLASS_CLASSIC);
+
+   /* If the syscall is a door_return call then it has to be handled very
+      differently. */
+   if (tst->os_state.in_door_return)
+      err = ML_(do_syscall_for_client_dret_WRK)(
+                syscallno, &tst->arch.vex,
+                syscall_mask, &saved, &cflag
+            );
+   else
+      err = ML_(do_syscall_for_client_WRK)(
+                syscallno, &tst->arch.vex,
+                syscall_mask, &saved, &cflag
+            );
+
+   /* Save the carry flag. */
+#  if defined(VGP_x86_solaris)
+   LibVEX_GuestX86_put_eflag_c(cflag, &tst->arch.vex);
+#  elif defined(VGP_amd64_solaris)
+   LibVEX_GuestAMD64_put_rflag_c(cflag, &tst->arch.vex);
+#  else
+#    error "Unknown platform"
+#  endif
+
 #  else
 #    error "Unknown OS"
 #  endif
@@ -662,6 +716,69 @@
    canonical->arg7  = 0;
    canonical->arg8  = 0;
 
+#elif defined(VGP_x86_solaris)
+   VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
+   UWord *stack = (UWord *)gst->guest_ESP;
+   canonical->sysno = gst->guest_EAX;
+   /* stack[0] is a return address. */
+   canonical->arg1  = stack[1];
+   canonical->arg2  = stack[2];
+   canonical->arg3  = stack[3];
+   canonical->arg4  = stack[4];
+   canonical->arg5  = stack[5];
+   canonical->arg6  = stack[6];
+   canonical->arg7  = stack[7];
+   canonical->arg8  = stack[8];
+
+   switch (trc) {
+   case VEX_TRC_JMP_SYS_INT145:
+   case VEX_TRC_JMP_SYS_SYSENTER:
+   case VEX_TRC_JMP_SYS_SYSCALL:
+   /* These three are not actually valid syscall instructions on Solaris.
+      Pretend for now that we handle them as normal syscalls. */
+   case VEX_TRC_JMP_SYS_INT128:
+   case VEX_TRC_JMP_SYS_INT129:
+   case VEX_TRC_JMP_SYS_INT130:
+      /* int $0x91, sysenter, syscall = normal syscall */
+      break;
+   case VEX_TRC_JMP_SYS_INT210:
+      /* int $0xD2 = fasttrap */
+      canonical->sysno
+         = VG_SOLARIS_SYSCALL_CONSTRUCT_FASTTRAP(canonical->sysno);
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+
+#elif defined(VGP_amd64_solaris)
+   VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
+   UWord *stack = (UWord *)gst->guest_RSP;
+   canonical->sysno = gst->guest_RAX;
+   /* stack[0] is a return address. */
+   canonical->arg1 = gst->guest_RDI;
+   canonical->arg2 = gst->guest_RSI;
+   canonical->arg3 = gst->guest_RDX;
+   canonical->arg4 = gst->guest_R10;  /* Not RCX with syscall. */
+   canonical->arg5 = gst->guest_R8;
+   canonical->arg6 = gst->guest_R9;
+   canonical->arg7 = stack[1];
+   canonical->arg8 = stack[2];
+
+   switch (trc) {
+   case VEX_TRC_JMP_SYS_SYSCALL:
+      /* syscall = normal syscall */
+      break;
+   case VEX_TRC_JMP_SYS_INT210:
+      /* int $0xD2 = fasttrap */
+      canonical->sysno
+         = VG_SOLARIS_SYSCALL_CONSTRUCT_FASTTRAP(canonical->sysno);
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+
 #else
 #  error "getSyscallArgsFromGuestState: unknown arch"
 #endif
@@ -817,6 +934,42 @@
    gst->guest_r4 = canonical->arg5;
    gst->guest_r5 = canonical->arg6;
 
+#elif defined(VGP_x86_solaris)
+   VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
+   UWord *stack = (UWord *)gst->guest_ESP;
+
+   /* Fasttraps or anything else cannot go through this way. */
+   vg_assert(VG_SOLARIS_SYSNO_CLASS(canonical->sysno)
+             == VG_SOLARIS_SYSCALL_CLASS_CLASSIC);
+   gst->guest_EAX = canonical->sysno;
+   /* stack[0] is a return address. */
+   stack[1] = canonical->arg1;
+   stack[2] = canonical->arg2;
+   stack[3] = canonical->arg3;
+   stack[4] = canonical->arg4;
+   stack[5] = canonical->arg5;
+   stack[6] = canonical->arg6;
+   stack[7] = canonical->arg7;
+   stack[8] = canonical->arg8;
+
+#elif defined(VGP_amd64_solaris)
+   VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
+   UWord *stack = (UWord *)gst->guest_RSP;
+
+   /* Fasttraps or anything else cannot go through this way. */
+   vg_assert(VG_SOLARIS_SYSNO_CLASS(canonical->sysno)
+             == VG_SOLARIS_SYSCALL_CLASS_CLASSIC);
+   gst->guest_RAX = canonical->sysno;
+   /* stack[0] is a return address. */
+   gst->guest_RDI = canonical->arg1;
+   gst->guest_RSI = canonical->arg2;
+   gst->guest_RDX = canonical->arg3;
+   gst->guest_R10 = canonical->arg4;
+   gst->guest_R8  = canonical->arg5;
+   gst->guest_R9  = canonical->arg6;
+   stack[1] = canonical->arg7;
+   stack[2] = canonical->arg8;
+
 #else
 #  error "putSyscallArgsIntoGuestState: unknown arch"
 #endif
@@ -950,6 +1103,24 @@
    canonical->sres = VG_(mk_SysRes_tilegx_linux)( gst->guest_r0 );
    canonical->what = SsComplete;
 
+#  elif defined(VGP_x86_solaris)
+   VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
+   UInt carry = 1 & LibVEX_GuestX86_get_eflags(gst);
+
+   canonical->sres = VG_(mk_SysRes_x86_solaris)(carry ? True : False,
+                                                gst->guest_EAX,
+                                                carry ? 0 : gst->guest_EDX);
+   canonical->what = SsComplete;
+
+#  elif defined(VGP_amd64_solaris)
+   VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
+   UInt carry = 1 & LibVEX_GuestAMD64_get_rflags(gst);
+
+   canonical->sres = VG_(mk_SysRes_amd64_solaris)(carry ? True : False,
+                                                  gst->guest_RAX,
+                                                  carry ? 0 : gst->guest_RDX);
+   canonical->what = SsComplete;
+
 #  else
 #    error "getSyscallStatusFromGuestState: unknown arch"
 #  endif
@@ -1175,6 +1346,60 @@
       gst->guest_r1 = 0;
    }
 
+#  elif defined(VGP_x86_solaris)
+   VexGuestX86State* gst = (VexGuestX86State*)gst_vanilla;
+   SysRes sres = canonical->sres;
+   vg_assert(canonical->what == SsComplete);
+
+   if (sr_isError(sres)) {
+      gst->guest_EAX = sr_Err(sres);
+      VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_x86_EAX,
+               sizeof(UInt));
+      LibVEX_GuestX86_put_eflag_c(1, gst);
+   }
+   else {
+      gst->guest_EAX = sr_Res(sres);
+      VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_x86_EAX,
+               sizeof(UInt));
+      gst->guest_EDX = sr_ResHI(sres);
+      VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_x86_EDX,
+               sizeof(UInt));
+      LibVEX_GuestX86_put_eflag_c(0, gst);
+   }
+   /* Make CC_DEP1 and CC_DEP2 defined.  This is inaccurate because it makes
+      other eflags defined too (see README.solaris). */
+   VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, offsetof(VexGuestX86State,
+            guest_CC_DEP1), sizeof(UInt));
+   VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, offsetof(VexGuestX86State,
+            guest_CC_DEP2), sizeof(UInt));
+
+#  elif defined(VGP_amd64_solaris)
+   VexGuestAMD64State* gst = (VexGuestAMD64State*)gst_vanilla;
+   SysRes sres = canonical->sres;
+   vg_assert(canonical->what == SsComplete);
+
+   if (sr_isError(sres)) {
+      gst->guest_RAX = sr_Err(sres);
+      VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_amd64_RAX,
+               sizeof(ULong));
+      LibVEX_GuestAMD64_put_rflag_c(1, gst);
+   }
+   else {
+      gst->guest_RAX = sr_Res(sres);
+      VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_amd64_RAX,
+               sizeof(ULong));
+      gst->guest_RDX = sr_ResHI(sres);
+      VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_amd64_RDX,
+               sizeof(ULong));
+      LibVEX_GuestAMD64_put_rflag_c(0, gst);
+   }
+   /* Make CC_DEP1 and CC_DEP2 defined.  This is inaccurate because it makes
+      other eflags defined too (see README.solaris). */
+   VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, offsetof(VexGuestAMD64State,
+            guest_CC_DEP1), sizeof(ULong));
+   VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, offsetof(VexGuestAMD64State,
+            guest_CC_DEP2), sizeof(ULong));
+
 #  else
 #    error "putSyscallStatusIntoGuestState: unknown arch"
 #  endif
@@ -1311,6 +1536,7 @@
    layout->o_arg6   = OFFSET_s390x_r7;
    layout->uu_arg7  = -1; /* impossible value */
    layout->uu_arg8  = -1; /* impossible value */
+
 #elif defined(VGP_tilegx_linux)
    layout->o_sysno  = OFFSET_tilegx_r(10);
    layout->o_arg1   = OFFSET_tilegx_r(0);
@@ -1322,6 +1548,29 @@
    layout->uu_arg7  = -1; /* impossible value */
    layout->uu_arg8  = -1; /* impossible value */
 
+#elif defined(VGP_x86_solaris)
+   layout->o_sysno  = OFFSET_x86_EAX;
+   /* Syscall parameters are on the stack. */
+   layout->s_arg1   = sizeof(UWord) * 1;
+   layout->s_arg2   = sizeof(UWord) * 2;
+   layout->s_arg3   = sizeof(UWord) * 3;
+   layout->s_arg4   = sizeof(UWord) * 4;
+   layout->s_arg5   = sizeof(UWord) * 5;
+   layout->s_arg6   = sizeof(UWord) * 6;
+   layout->s_arg7   = sizeof(UWord) * 7;
+   layout->s_arg8   = sizeof(UWord) * 8;
+
+#elif defined(VGP_amd64_solaris)
+   layout->o_sysno  = OFFSET_amd64_RAX;
+   layout->o_arg1   = OFFSET_amd64_RDI;
+   layout->o_arg2   = OFFSET_amd64_RSI;
+   layout->o_arg3   = OFFSET_amd64_RDX;
+   layout->o_arg4   = OFFSET_amd64_R10;
+   layout->o_arg5   = OFFSET_amd64_R8;
+   layout->o_arg6   = OFFSET_amd64_R9;
+   layout->s_arg7   = sizeof(UWord) * 1;
+   layout->s_arg8   = sizeof(UWord) * 2;
+
 #else
 #  error "getSyscallLayout: unknown arch"
 #endif
@@ -1353,6 +1602,10 @@
    VG_(dmsg)("it at http://valgrind.org/support/bug_reports.html.\n");
 
    SET_STATUS_Failure(VKI_ENOSYS);
+
+#  if defined(VGO_solaris)
+   VG_(exit)(1);
+#  endif
 }
 
 static SyscallTableEntry bad_sys =
@@ -1389,6 +1642,9 @@
       break;
    }
 
+#  elif defined(VGO_solaris)
+   sys = ML_(get_solaris_syscall_entry)(syscallno);
+
 #  else
 #    error Unknown OS
 #  endif
@@ -1731,6 +1987,10 @@
             go through anyway, with SfToBlock set, hence we end up here. */
          putSyscallArgsIntoGuestState( &sci->args, &tst->arch.vex );
 
+         /* SfNoWriteResult flag is invalid for blocking signals because
+            do_syscall_for_client() directly modifies the guest state. */
+         vg_assert(!(sci->flags & SfNoWriteResult));
+
          /* Drop the bigLock */
          VG_(release_BigLock)(tid, VgTs_WaitSys, "VG_(client_syscall)[async]");
          /* Urr.  We're now in a race against other threads trying to
@@ -1985,6 +2245,15 @@
   extern const Addr ML_(blksys_complete_UNIX);
   extern const Addr ML_(blksys_committed_UNIX);
   extern const Addr ML_(blksys_finished_UNIX);
+#elif defined(VGO_solaris)
+  extern const Addr ML_(blksys_setup);
+  extern const Addr ML_(blksys_complete);
+  extern const Addr ML_(blksys_committed);
+  extern const Addr ML_(blksys_finished);
+  extern const Addr ML_(blksys_setup_DRET);
+  extern const Addr ML_(blksys_complete_DRET);
+  extern const Addr ML_(blksys_committed_DRET);
+  extern const Addr ML_(blksys_finished_DRET);
 #else
 # error "Unknown OS"
 #endif
@@ -2223,6 +2492,57 @@
       vg_assert(p[0] == 0x286b180051485000ULL);
    }
 
+#elif defined(VGP_x86_solaris)
+   arch->vex.guest_EIP -= 2;   // sizeof(int $0x91) or sizeof(syscall)
+
+   /* Make sure our caller is actually sane, and we're really backing
+      back over a syscall.
+
+      int $0x91 == CD 91
+      syscall   == 0F 05
+      sysenter  == 0F 34
+
+      Handle also other syscall instructions because we also handle them in
+      the scheduler.
+      int $0x80 == CD 80
+      int $0x81 == CD 81
+      int $0x82 == CD 82
+   */
+   {
+      UChar *p = (UChar *)arch->vex.guest_EIP;
+
+      Bool  ok = (p[0] == 0xCD && p[1] == 0x91)
+                  || (p[0] == 0x0F && p[1] == 0x05)
+                  || (p[0] == 0x0F && p[1] == 0x34)
+                  || (p[0] == 0xCD && p[1] == 0x80)
+                  || (p[0] == 0xCD && p[1] == 0x81)
+                  || (p[0] == 0xCD && p[1] == 0x82);
+      if (!ok)
+         VG_(message)(Vg_DebugMsg,
+                      "?! restarting over syscall at %#x %02x %02x\n",
+                      arch->vex.guest_EIP, p[0], p[1]);
+      vg_assert(ok);
+   }
+
+#elif defined(VGP_amd64_solaris)
+   arch->vex.guest_RIP -= 2;   // sizeof(syscall)
+
+   /* Make sure our caller is actually sane, and we're really backing
+      back over a syscall.
+
+      syscall   == 0F 05
+   */
+   {
+      UChar *p = (UChar *)arch->vex.guest_RIP;
+
+      Bool  ok = (p[0] == 0x0F && p[1] == 0x05);
+      if (!ok)
+         VG_(message)(Vg_DebugMsg,
+                      "?! restarting over syscall at %#llx %02x %02x\n",
+                      arch->vex.guest_RIP, p[0], p[1]);
+      vg_assert(ok);
+   }
+
 #else
 #  error "ML_(fixup_guest_state_to_restart_syscall): unknown plat"
 #endif
@@ -2263,7 +2583,8 @@
 VG_(fixup_guest_state_after_syscall_interrupted)( ThreadId tid, 
                                                   Addr     ip, 
                                                   SysRes   sres,
-                                                  Bool     restart)
+                                                  Bool     restart,
+                                                  struct vki_ucontext *uc)
 {
    /* Note that we don't know the syscall number here, since (1) in
       general there's no reliable way to get hold of it short of
@@ -2288,6 +2609,24 @@
         in_complete_to_committed, // [3,4) in the .S files
         in_committed_to_finished; // [4,5) in the .S files
 
+   if (VG_(clo_trace_signals))
+      VG_(message)( Vg_DebugMsg,
+                    "interrupted_syscall: tid=%d, ip=0x%llx, "
+                    "restart=%s, sres.isErr=%s, sres.val=%lld\n",
+                    (Int)tid,
+                    (ULong)ip,
+                    restart ? "True" : "False",
+                    sr_isError(sres) ? "True" : "False",
+                    (Long)(sr_isError(sres) ? sr_Err(sres) : sr_Res(sres)) );
+
+   vg_assert(VG_(is_valid_tid)(tid));
+   vg_assert(tid >= 1 && tid < VG_N_THREADS);
+   vg_assert(VG_(is_running_thread)(tid));
+
+   tst     = VG_(get_ThreadState)(tid);
+   th_regs = &tst->arch;
+   sci     = & syscallInfo[tid];
+
 #  if defined(VGO_linux)
    outside_range
       = ip < ML_(blksys_setup) || ip >= ML_(blksys_finished);
@@ -2321,28 +2660,36 @@
       || (ip >= ML_(blksys_committed_MDEP) && ip < ML_(blksys_finished_MDEP))
       || (ip >= ML_(blksys_committed_UNIX) && ip < ML_(blksys_finished_UNIX));
    /* Wasn't that just So Much Fun?  Does your head hurt yet?  Mine does. */
+#  elif defined(VGO_solaris)
+   /* The solaris port is never outside the range. */
+   outside_range = False;
+   /* The Solaris kernel never restarts syscalls directly! */
+   at_restart = False;
+   if (tst->os_state.in_door_return) {
+      vg_assert(ip >= ML_(blksys_setup_DRET)
+                && ip < ML_(blksys_finished_DRET));
+
+      in_setup_to_restart
+         = ip >= ML_(blksys_setup_DRET) && ip < ML_(blksys_complete_DRET);
+      in_complete_to_committed
+         = ip >= ML_(blksys_complete_DRET) && ip < ML_(blksys_committed_DRET);
+      in_committed_to_finished
+         = ip >= ML_(blksys_committed_DRET) && ip < ML_(blksys_finished_DRET);
+   }
+   else {
+      vg_assert(ip >= ML_(blksys_setup) && ip < ML_(blksys_finished));
+
+      in_setup_to_restart
+         = ip >= ML_(blksys_setup) && ip < ML_(blksys_complete);
+      in_complete_to_committed
+         = ip >= ML_(blksys_complete) && ip < ML_(blksys_committed);
+      in_committed_to_finished
+         = ip >= ML_(blksys_committed) && ip < ML_(blksys_finished);
+   }
 #  else
 #    error "Unknown OS"
 #  endif
 
-   if (VG_(clo_trace_signals))
-      VG_(message)( Vg_DebugMsg,
-                    "interrupted_syscall: tid=%d, ip=0x%llx, "
-                    "restart=%s, sres.isErr=%s, sres.val=%lld\n", 
-                    (Int)tid,
-                    (ULong)ip, 
-                    restart ? "True" : "False", 
-                    sr_isError(sres) ? "True" : "False",
-                    (Long)(sr_isError(sres) ? sr_Err(sres) : sr_Res(sres)) );
-
-   vg_assert(VG_(is_valid_tid)(tid));
-   vg_assert(tid >= 1 && tid < VG_N_THREADS);
-   vg_assert(VG_(is_running_thread)(tid));
-
-   tst     = VG_(get_ThreadState)(tid);
-   th_regs = &tst->arch;
-   sci     = & syscallInfo[tid];
-
    /* Figure out what the state of the syscall was by examining the
       (real) IP at the time of the signal, and act accordingly. */
    if (outside_range) {
@@ -2372,6 +2719,11 @@
 
    else 
    if (at_restart) {
+#     if defined(VGO_solaris)
+      /* We should never hit this branch on Solaris, see the comment above. */
+      vg_assert(0);
+#     endif
+
       /* We're either about to run the syscall, or it was interrupted
          and the kernel restarted it.  Restart if asked, otherwise
          EINTR it. */
@@ -2401,8 +2753,20 @@
          VG_(message)( Vg_DebugMsg,
                        "  completed, but uncommitted: committing\n");
       canonical = convert_SysRes_to_SyscallStatus( sres );
-      if (!(sci->flags & SfNoWriteResult))
-         putSyscallStatusIntoGuestState( tid, &canonical, &th_regs->vex );
+      vg_assert(!(sci->flags & SfNoWriteResult));
+      putSyscallStatusIntoGuestState( tid, &canonical, &th_regs->vex );
+#     if defined(VGO_solaris)
+      if (tst->os_state.in_door_return) {
+#        if defined(VGP_x86_solaris)
+         /* Registers %esp and %ebp were also modified by the syscall. */
+         tst->arch.vex.guest_ESP = uc->uc_mcontext.gregs[VKI_UESP];
+         tst->arch.vex.guest_EBP = uc->uc_mcontext.gregs[VKI_EBP];
+#        elif defined(VGP_amd64_solaris)
+         tst->arch.vex.guest_RSP = uc->uc_mcontext.gregs[VKI_REG_RSP];
+         tst->arch.vex.guest_RBP = uc->uc_mcontext.gregs[VKI_REG_RBP];
+#        endif
+      }
+#     endif
       sci->status = canonical;
       VG_(post_syscall)(tid);
    } 
@@ -2415,6 +2779,13 @@
       if (VG_(clo_trace_signals))
          VG_(message)( Vg_DebugMsg,
                        "  completed and committed: nothing to do\n");
+#     if defined(VGP_x86_solaris)
+      /* The %eax and %edx values are committed but the carry flag is still
+         uncommitted.  Save it now. */
+      LibVEX_GuestX86_put_eflag_c(sr_isError(sres), &th_regs->vex);
+#     elif defined(VGP_amd64_solaris)
+      LibVEX_GuestAMD64_put_rflag_c(sr_isError(sres), &th_regs->vex);
+#     endif
       getSyscallStatusFromGuestState( &sci->status, &th_regs->vex );
       vg_assert(sci->status.what == SsComplete);
       VG_(post_syscall)(tid);
@@ -2431,6 +2802,21 @@
 }
 
 
+#if defined(VGO_solaris)
+/* Returns True if ip is inside a fixable syscall code in syscall-*-*.S.  This
+   function can be called by a 'non-running' thread! */
+Bool VG_(is_ip_in_blocking_syscall)(ThreadId tid, Addr ip)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+
+   if (tst->os_state.in_door_return)
+      return ip >= ML_(blksys_setup_DRET) && ip < ML_(blksys_finished_DRET);
+   else
+      return ip >= ML_(blksys_setup) && ip < ML_(blksys_finished);
+}
+#endif
+
+
 #if defined(VGO_darwin)
 // Clean up after workq_ops(WQOPS_THREAD_RETURN) jumped to wqthread_hijack. 
 // This is similar to VG_(fixup_guest_state_after_syscall_interrupted).
diff --git a/coregrind/m_syswrap/syswrap-solaris.c b/coregrind/m_syswrap/syswrap-solaris.c
new file mode 100644
index 0000000..9a3e04d
--- /dev/null
+++ b/coregrind/m_syswrap/syswrap-solaris.c
@@ -0,0 +1,9978 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Solaris-specific syscalls, etc.            syswrap-solaris.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2011-2015 Petr Pavlu
+      setup@dagobah.cz
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* Copyright 2013-2015, Ivo Raisr <ivosh@ivosh.net>. */
+
+/* Copyright 2013, OmniTI Computer Consulting, Inc. All rights reserved. */
+
+#if defined(VGO_solaris)
+
+#include "libvex_guest_offsets.h"
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_vkiscnums.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_debuginfo.h"         // VG_(di_notify_*)
+#include "pub_core_debuglog.h"
+#include "pub_core_clientstate.h"
+#include "pub_core_gdbserver.h"
+#include "pub_core_inner.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcfile.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcproc.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_machine.h"           // VG_(get_SP)
+#include "pub_core_mallocfree.h"
+#include "pub_core_options.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_transtab.h"          // VG_(discard_translations)
+#include "pub_core_scheduler.h"
+#include "pub_core_sigframe.h"
+#include "pub_core_signals.h"
+#include "pub_core_stacks.h"
+#include "pub_core_syscall.h"
+#include "pub_core_syswrap.h"
+#include "pub_core_ume.h"
+#if defined(ENABLE_INNER_CLIENT_REQUEST)
+#include "pub_core_clreq.h"
+#endif
+
+#include "priv_types_n_macros.h"
+#include "priv_syswrap-generic.h"
+#include "priv_syswrap-solaris.h"
+
+/* Return the number of non-dead and daemon threads.
+   count_daemon == True:  count daemon threads
+   count_daemon == False: count non-daemon threads */
+static UInt count_living_daemon_threads(Bool count_daemon)
+{
+   UInt count = 0;
+   for (ThreadId tid = 1; tid < VG_N_THREADS; tid++)
+      if (VG_(threads)[tid].status != VgTs_Empty &&
+         VG_(threads)[tid].status != VgTs_Zombie &&
+         VG_(threads)[tid].os_state.daemon_thread == count_daemon)
+         count++;
+
+   return count;
+}
+
+/* Note: The following functions (thread_wrapper, run_a_thread_NORETURN,
+   ML_(start_thread_NORETURN), ML_(allocstack) and
+   VG_(main_thread_wrapper_NORETURN)) are based on the code in
+   syswrap-linux.c.  Keep them synchronized! */
+
+/* Run a thread from beginning to end and return the thread's
+   scheduler-return-code. */
+static VgSchedReturnCode thread_wrapper(Word /*ThreadId*/ tidW)
+{
+   VgSchedReturnCode ret;
+   ThreadId tid = (ThreadId)tidW;
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+
+   VG_(debugLog)(1, "syswrap-solaris",
+                    "thread_wrapper(tid=%lld): entry\n",
+                    (ULong)tidW);
+
+   vg_assert(tst->status == VgTs_Init);
+
+   /* Make sure we get the CPU lock before doing anything significant. */
+   VG_(acquire_BigLock)(tid, "thread_wrapper(starting new thread)");
+
+   if (0)
+      VG_(printf)("thread tid %d started: stack = %p\n", tid, &tid);
+
+   /* Make sure error reporting is enabled in the new thread. */
+   tst->err_disablement_level = 0;
+
+   if (tid == 1)
+      VG_TRACK(pre_thread_first_insn, tid);
+   else {
+      /* For newly created threads, VG_TRACK(pre_thread_first_insn, tid) is
+         invoked later from PRE(sys_getsetcontext)() when setucontext()
+         called from _thrp_setup() concludes new thread setup. Invoking it
+         here would be way too early - new thread has no stack, yet. */
+   }
+
+   tst->os_state.lwpid = VG_(gettid)();
+   tst->os_state.threadgroup = VG_(getpid)();
+
+   /* Thread created with all signals blocked; scheduler will set the
+      appropriate mask. */
+
+   ret = VG_(scheduler)(tid);
+
+   vg_assert(VG_(is_exiting)(tid));
+
+   vg_assert(tst->status == VgTs_Runnable);
+   vg_assert(VG_(is_running_thread)(tid));
+
+   VG_(debugLog)(1, "syswrap-solaris",
+                    "thread_wrapper(tid=%lld): exit, schedreturncode %s\n",
+                    (ULong)tidW, VG_(name_of_VgSchedReturnCode)(ret));
+
+   /* Return to caller, still holding the lock. */
+   return ret;
+}
+
+/* Run a thread all the way to the end, then do appropriate exit actions
+   (this is the last-one-out-turn-off-the-lights bit). */
+static void run_a_thread_NORETURN(Word tidW)
+{
+   ThreadId tid = (ThreadId)tidW;
+   VgSchedReturnCode src;
+   Int c;
+   ThreadState *tst;
+#ifdef ENABLE_INNER_CLIENT_REQUEST
+   Int registered_vgstack_id;
+#endif
+
+   VG_(debugLog)(1, "syswrap-solaris",
+                    "run_a_thread_NORETURN(tid=%lld): pre-thread_wrapper\n",
+                    (ULong)tidW);
+
+   tst = VG_(get_ThreadState)(tid);
+   vg_assert(tst);
+
+   /* A thread has two stacks:
+      * the simulated stack (used by the synthetic cpu. Guest process
+        is using this stack).
+      * the valgrind stack (used by the real cpu. Valgrind code is running
+        on this stack).
+      When Valgrind runs as an inner, it must signal that its (real) stack
+      is the stack to use by the outer to e.g. do stacktraces.
+   */
+   INNER_REQUEST
+      (registered_vgstack_id
+       = VALGRIND_STACK_REGISTER(tst->os_state.valgrind_stack_base,
+                                 tst->os_state.valgrind_stack_init_SP));
+
+   /* Run the thread all the way through. */
+   src = thread_wrapper(tid);
+
+   VG_(debugLog)(1, "syswrap-solaris",
+                    "run_a_thread_NORETURN(tid=%lld): post-thread_wrapper\n",
+                    (ULong)tidW);
+
+   c = count_living_daemon_threads(False);
+   vg_assert(c >= 1); /* Stay sane. */
+
+   /* Tell the tool that schedctl data belonging to this thread are gone. */
+   Addr a = tst->os_state.schedctl_data;
+   if (a != 0)
+      VG_TRACK(die_mem_munmap, a, sizeof(struct vki_sc_shared));
+
+   /* Deregister thread's stack. */
+   if (tst->os_state.stk_id != (UWord)-1)
+      VG_(deregister_stack)(tst->os_state.stk_id);
+
+   /* Tell the tool this thread is exiting. */
+   VG_TRACK(pre_thread_ll_exit, tid);
+
+   /* If the thread is exiting with errors disabled, complain loudly;
+      doing so is bad (does the user know this has happened?)  Also, in all
+      cases, be paranoid and clear the flag anyway so that the thread slot is
+      safe in this respect if later reallocated.  This should be unnecessary
+      since the flag should be cleared when the slot is reallocated, in
+      thread_wrapper(). */
+   if (tst->err_disablement_level > 0) {
+      VG_(umsg)(
+         "WARNING: exiting thread has error reporting disabled.\n"
+         "WARNING: possibly as a result of some mistake in the use\n"
+         "WARNING: of the VALGRIND_DISABLE_ERROR_REPORTING macros.\n"
+      );
+      VG_(debugLog)(
+         1, "syswrap-solaris",
+            "run_a_thread_NORETURN(tid=%lld): "
+            "WARNING: exiting thread has err_disablement_level = %u\n",
+            (ULong)tidW, tst->err_disablement_level
+      );
+   }
+   tst->err_disablement_level = 0;
+
+   if (c == 1) {
+      UInt daemon_threads = count_living_daemon_threads(True);
+      if (daemon_threads == 0)
+         VG_(debugLog)(1, "syswrap-solaris",
+                          "run_a_thread_NORETURN(tid=%lld): "
+                          "last one standing\n",
+                          (ULong) tidW);
+      else
+         VG_(debugLog)(1, "syswrap-solaris",
+                          "run_a_thread_NORETURN(tid=%lld): "
+                          "last non-daemon thread standing "
+                          "[daemon threads=%u]\n",
+                          (ULong) tidW, daemon_threads);
+
+      /* We are the last non-daemon thread standing. Keep hold of the lock and
+         carry on to show final tool results, then exit the entire system.
+         Use the continuation pointer set at startup in m_main. */
+      if ((src == VgSrc_ExitThread) && (daemon_threads > 0))
+         src = VgSrc_ExitProcess;
+      (*VG_(address_of_m_main_shutdown_actions_NORETURN))(tid, src);
+   }
+   else {
+      VG_(debugLog)(1, "syswrap-solaris",
+                       "run_a_thread_NORETURN(tid=%lld): "
+                       "not last one standing\n",
+                       (ULong)tidW);
+
+      /* OK, thread is dead, but others still exist.  Just exit. */
+
+      /* This releases the run lock. */
+      VG_(exit_thread)(tid);
+      vg_assert(tst->status == VgTs_Zombie);
+      vg_assert(sizeof(tst->status) == 4);
+
+      INNER_REQUEST(VALGRIND_STACK_DEREGISTER(registered_vgstack_id));
+
+      /* We have to use this sequence to terminate the thread to
+         prevent a subtle race.  If VG_(exit_thread)() had left the
+         ThreadState as Empty, then it could have been reallocated, reusing
+         the stack while we're doing these last cleanups.  Instead,
+         VG_(exit_thread) leaves it as Zombie to prevent reallocation.  We
+         need to make sure we don't touch the stack between marking it Empty
+         and exiting.  Hence the assembler. */
+#if defined(VGP_x86_solaris)
+      /* Luckily lwp_exit doesn't take any arguments so we don't have to mess
+         with the stack. */
+      __asm__ __volatile__ (
+         "movl  %[EMPTY], %[status]\n"  /* set tst->status = VgTs_Empty */
+         "movl  $"VG_STRINGIFY(__NR_lwp_exit)", %%eax\n"
+         "int   $0x91\n"                /* lwp_exit() */
+         : [status] "=m" (tst->status)
+         : [EMPTY] "n" (VgTs_Empty)
+         : "eax", "edx", "cc", "memory");
+#elif defined(VGP_amd64_solaris)
+      __asm__ __volatile__ (
+         "movl  %[EMPTY], %[status]\n"  /* set tst->status = VgTs_Empty */
+         "movq  $"VG_STRINGIFY(__NR_lwp_exit)", %%rax\n"
+         "syscall\n"                    /* lwp_exit() */
+         : [status] "=m" (tst->status)
+         : [EMPTY] "n" (VgTs_Empty)
+         : "rax", "rdx", "cc", "memory");
+#else
+#  error "Unknown platform"
+#endif
+
+      VG_(core_panic)("Thread exit failed?\n");
+   }
+
+   /*NOTREACHED*/
+   vg_assert(0);
+}
+
+Word ML_(start_thread_NORETURN)(void *arg)
+{
+   ThreadState *tst = (ThreadState*)arg;
+   ThreadId tid = tst->tid;
+
+   run_a_thread_NORETURN((Word)tid);
+   /*NOTREACHED*/
+   vg_assert(0);
+}
+
+/* Allocate a stack for this thread, if it doesn't already have one.
+   They're allocated lazily, and never freed.  Returns the initial stack
+   pointer value to use, or 0 if allocation failed. */
+Addr ML_(allocstack)(ThreadId tid)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   VgStack *stack;
+   Addr initial_SP;
+
+   /* Either the stack_base and stack_init_SP are both zero (in which
+      case a stack hasn't been allocated) or they are both non-zero,
+      in which case it has. */
+
+   if (tst->os_state.valgrind_stack_base == 0)
+      vg_assert(tst->os_state.valgrind_stack_init_SP == 0);
+
+   if (tst->os_state.valgrind_stack_base != 0)
+      vg_assert(tst->os_state.valgrind_stack_init_SP != 0);
+
+   /* If no stack is present, allocate one. */
+
+   if (tst->os_state.valgrind_stack_base == 0) {
+      stack = VG_(am_alloc_VgStack)( &initial_SP );
+      if (stack) {
+         tst->os_state.valgrind_stack_base = (Addr)stack;
+         tst->os_state.valgrind_stack_init_SP = initial_SP;
+      }
+   }
+
+   if (0)
+      VG_(printf)("stack for tid %d at %p; init_SP=%p\n",
+                  tid,
+                  (void*)tst->os_state.valgrind_stack_base,
+                  (void*)tst->os_state.valgrind_stack_init_SP);
+
+   return tst->os_state.valgrind_stack_init_SP;
+}
+
+/* Allocate a stack for the main thread, and run it all the way to the
+   end.  Although we already have a working VgStack (VG_(interim_stack)) it's
+   better to allocate a new one, so that overflow detection works uniformly
+   for all threads.  Also initialize the GDT (for normal threads, this is done
+   in the PRE wrapper of lwp_create). */
+void VG_(main_thread_wrapper_NORETURN)(ThreadId tid)
+{
+   Addr sp;
+
+   VG_(debugLog)(1, "syswrap-solaris",
+                    "entering VG_(main_thread_wrapper_NORETURN)\n");
+
+   sp = ML_(allocstack)(tid);
+#if defined(ENABLE_INNER_CLIENT_REQUEST)
+   {
+      // we must register the main thread stack before the call
+      // to ML_(call_on_new_stack_0_1), otherwise the outer valgrind
+      // reports 'write error' on the non registered stack.
+      ThreadState *tst = VG_(get_ThreadState)(tid);
+      INNER_REQUEST
+         ((void) 
+          VALGRIND_STACK_REGISTER(tst->os_state.valgrind_stack_base,
+                                  tst->os_state.valgrind_stack_init_SP));
+   }
+#endif
+
+#if defined(VGP_x86_solaris)
+   {
+      ThreadState *tst = VG_(get_ThreadState)(tid);
+      ML_(setup_gdt)(&tst->arch.vex);
+      ML_(update_gdt_lwpgs)(tid);
+   }
+#elif defined(VGP_amd64_solaris)
+   /* Nothing to do. */
+#else
+#  error "Unknown platform"
+#endif
+
+   /* If we can't even allocate the first thread's stack, we're hosed.
+      Give up. */
+   vg_assert2(sp != 0, "Cannot allocate main thread's stack.");
+
+   /* Shouldn't be any other threads around yet. */
+   vg_assert(VG_(count_living_threads)() == 1);
+
+   ML_(call_on_new_stack_0_1)(
+      (Addr)sp,               /* stack */
+      0,                      /* bogus return address */
+      run_a_thread_NORETURN,  /* fn to call */
+      (Word)tid               /* arg to give it */
+   );
+
+   /*NOTREACHED*/
+   vg_assert(0);
+}
+
+/* Deallocate the GDT for a thread. */
+void VG_(cleanup_thread)(ThreadArchState *arch)
+{
+#if defined(VGP_x86_solaris)
+   ML_(cleanup_gdt)(&arch->vex);
+#elif defined(VGP_amd64_solaris)
+   /* Nothing to do. */
+#else
+#  error "Unknown platform"
+#endif
+}
+
+/*
+ * Notify core about spring cleaning of schedctl data pages for all threads
+ * in child post-fork handler. Libc will issue new schedctl syscalls for threads
+ * in the child when needs arise.
+ *
+ * See also POST(schedctl) and run_a_thread_NORETURN() when a thread exits.
+ */
+static void clean_schedctl_data(ThreadId tid)
+{
+   UInt i;
+   for (i = 0; i < VG_N_THREADS; i++) {
+      ThreadState *tst = &VG_(threads)[i];
+      if (tst->status != VgTs_Empty) {
+         Addr a = tst->os_state.schedctl_data;
+         if (a != 0) {
+            tst->os_state.schedctl_data = 0;
+            a = VG_PGROUNDDN(a);
+            if (VG_(am_find_anon_segment(a)))
+               VG_(am_notify_munmap)(a, VKI_PAGE_SIZE);
+         }
+      }
+   }
+}
+
+void VG_(syswrap_init)(void)
+{
+   VG_(atfork)(NULL, NULL, clean_schedctl_data);
+}
+
+/* Calculate the Fletcher-32 checksum of a given buffer. */
+UInt ML_(fletcher32)(UShort *buf, SizeT blocks)
+{
+   UInt sum1 = 0;
+   UInt sum2 = 0;
+   SizeT i;
+
+   for (i = 0; i < blocks; i++) {
+      sum1 = (sum1 + buf[i]) % 0xffff;
+      sum2 = (sum2 + sum1) % 0xffff;
+   }
+
+   return (sum2 << 16) | sum1;
+}
+
+/* Calculate the Fletcher-64 checksum of a given buffer. */
+ULong ML_(fletcher64)(UInt *buf, SizeT blocks)
+{
+   ULong sum1 = 0;
+   ULong sum2 = 0;
+   SizeT i;
+
+   for (i = 0; i < blocks; i++) {
+      sum1 = (sum1 + buf[i]) % 0xffffffff;
+      sum2 = (sum2 + sum1) % 0xffffffff;
+   }
+   return (sum2 << 32) | sum1;
+}
+
+/* Save a complete context (VCPU state, sigmask) of a given client thread
+   into the vki_ucontext_t structure.  This structure is supposed to be
+   allocated in the client memory, a caller must make sure that the memory can
+   be dereferenced.  The active tool is informed about the save. */
+void VG_(save_context)(ThreadId tid, vki_ucontext_t *uc, CorePart part)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+
+   VG_TRACK(pre_mem_write, part, tid, "save_context(uc)", (Addr)uc,
+            sizeof(*uc));
+
+   uc->uc_flags = VKI_UC_ALL;
+   VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_flags,
+            sizeof(uc->uc_flags));
+
+   /* Old context */
+   uc->uc_link = tst->os_state.oldcontext;
+   VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_link,
+            sizeof(uc->uc_link));
+
+   /* Clear uc->vki_uc_signo.  This slot is used by the signal machinery to
+      store a signal number. */
+   VKI_UC_SIGNO(uc) = 0;
+
+   /* Sigmask */
+   uc->uc_sigmask = tst->sig_mask;
+   VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_sigmask,
+            sizeof(uc->uc_sigmask));
+
+   /* Stack */
+   {
+      if (tst->os_state.ustack
+          && ML_(safe_to_deref)(tst->os_state.ustack, sizeof(vki_stack_t))
+          && tst->os_state.ustack->ss_size) {
+         /* If ustack points to a valid stack copy it to ucontext. */
+         uc->uc_stack = *tst->os_state.ustack;
+      }
+      else {
+         /* Ustack is not valid.  A correct stack has to be figured out
+            manually. */
+         SysRes res;
+         vki_stack_t altstack;
+
+         /* Get information about alternate stack. */
+         res = VG_(do_sys_sigaltstack)(tid, NULL, &altstack);
+         vg_assert(!sr_isError(res));
+
+         if (altstack.ss_flags == VKI_SS_ONSTACK) {
+            /* If the alternate stack is active copy it to ucontext. */
+            uc->uc_stack = altstack;
+         }
+         else {
+            /* No information about stack is present, save information about
+               current main stack to ucontext.  This branch should be reached
+               only by the main thread. */
+            ThreadState *tst2 = VG_(get_ThreadState)(1);
+            uc->uc_stack.ss_sp = (void*)(tst2->client_stack_highest_byte + 1
+                                         - tst2->client_stack_szB);
+            uc->uc_stack.ss_size = tst2->client_stack_szB;
+            uc->uc_stack.ss_flags = 0;
+         }
+      }
+
+      VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_stack,
+               sizeof(uc->uc_stack));
+   }
+
+   /* Save the architecture-specific part of the context. */
+   ML_(save_machine_context)(tid, uc, part);
+}
+
+/* Set a complete context (VCPU state, sigmask) of a given client thread
+   according to values passed in the vki_ucontext_t structure.  This structure
+   is supposed to be allocated in the client memory, a caller must make sure
+   that the memory can be dereferenced.  The active tool is informed about
+   what parts of the structure are read.
+
+   This function is a counterpart to VG_(save_context)(). */
+void VG_(restore_context)(ThreadId tid, vki_ucontext_t *uc, CorePart part,
+                          Bool esp_is_thrptr)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   Addr old_esp = VG_(get_SP)(tid);
+
+   VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_flags)",
+            (Addr)&uc->uc_flags, sizeof(uc->uc_flags));
+
+   /* Old context */
+   VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_link)",
+            (Addr)&uc->uc_link, sizeof(uc->uc_link));
+   tst->os_state.oldcontext = uc->uc_link;
+
+   /* Sigmask */
+   if (uc->uc_flags & VKI_UC_SIGMASK) {
+      SysRes res;
+
+      VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_sigmask)",
+               (Addr)&uc->uc_sigmask, sizeof(uc->uc_sigmask));
+      res = VG_(do_sys_sigprocmask)(tid, VKI_SIG_SETMASK, &uc->uc_sigmask,
+                                    NULL);
+      /* Setting signal mask should never fail. */
+      vg_assert(!sr_isError(res));
+   }
+
+   /* Stack */
+   if (uc->uc_flags & VKI_UC_STACK) {
+      VG_TRACK(pre_mem_read, part, tid, "restore_context(uc->uc_stack)",
+               (Addr)&uc->uc_stack, sizeof(uc->uc_stack));
+
+      if (uc->uc_stack.ss_flags == VKI_SS_ONSTACK) {
+         /* This seems to be a little bit dangerous but it is what the kernel
+            does. */
+         if (VG_(clo_trace_signals))
+            VG_(dmsg)("restore_context, sigaltstack: tid %d, "
+                      "ss %p{%p,sz=%lu,flags=%#x}\n",
+                      tid, &uc->uc_stack, uc->uc_stack.ss_sp,
+                      (SizeT)uc->uc_stack.ss_size, uc->uc_stack.ss_flags);
+
+         tst->altstack.ss_sp = uc->uc_stack.ss_sp;
+         tst->altstack.ss_size = uc->uc_stack.ss_size;
+         /* Do not copy ss_flags, they are calculated dynamically by
+            Valgrind. */
+      }
+
+      /* Copyout the new stack. */
+      if (tst->os_state.ustack
+          && VG_(am_is_valid_for_client)((Addr)tst->os_state.ustack,
+                                         sizeof(*tst->os_state.ustack),
+                                         VKI_PROT_WRITE))
+         *tst->os_state.ustack = uc->uc_stack;
+         VG_TRACK(post_mem_write, part, tid, (Addr)&tst->os_state.ustack,
+                  sizeof(tst->os_state.ustack));
+   }
+
+   /* Restore the architecture-specific part of the context. */
+   ML_(restore_machine_context)(tid, uc, part, esp_is_thrptr);
+
+   /* If the thread stack is already known, kill the deallocated stack area.
+      This is important when returning from a signal handler. */
+   if (tst->client_stack_highest_byte && tst->client_stack_szB) {
+      Addr end = tst->client_stack_highest_byte;
+      Addr start = end + 1 - tst->client_stack_szB;
+      Addr new_esp = VG_(get_SP)(tid);
+
+      /* Make sure that the old and new stack pointer are on the same (active)
+         stack.  Alternate stack is currently never affected by this code. */
+      if (start <= old_esp && old_esp <= end
+          && start <= new_esp && new_esp <= end
+          && new_esp > old_esp)
+         VG_TRACK(die_mem_stack, old_esp - VG_STACK_REDZONE_SZB,
+                  (new_esp - old_esp) + VG_STACK_REDZONE_SZB);
+   }
+}
+
+/* Set a client stack associated with a given thread id according to values
+   passed in the vki_stack_t structure. */
+static void set_stack(ThreadId tid, vki_stack_t *st)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   Addr new_start, new_end;
+   SizeT new_size;
+   Addr cur_start;
+   SizeT cur_size;
+
+   VG_(debugLog)(2, "syswrap-solaris",
+                    "set stack: sp=%#lx, size=%#lx.\n",
+                    (Addr)st->ss_sp, (SizeT)st->ss_size);
+
+   /* Stay sane. */
+   vg_assert(st->ss_flags == 0);
+
+   new_start = (Addr)st->ss_sp;
+   new_end = new_start + st->ss_size - 1;
+   new_size = st->ss_size;
+   cur_start = tst->client_stack_highest_byte + 1
+               - tst->client_stack_szB;
+   cur_size = tst->client_stack_szB;
+
+   if (new_start == cur_start && new_size == cur_size) {
+      /* No change is requested, bail out. */
+      return;
+   }
+
+   if (tid == 1 && (new_size == 0 || new_size > VG_(clstk_max_size))) {
+      /* The main thread requests to use a stack without any size checking, or
+         too big stack.  Fallback to the maximum allocated client stack. */
+
+      /* TODO I think it is possible to give up on setting main stack anyway.
+         Valgrind knows where it is located and it is already registered as
+         VG_(clstk_id). */
+
+      new_size = VG_(clstk_max_size);
+      new_end = tst->client_stack_highest_byte;
+      new_start = new_end + 1 - new_size;
+   }
+
+   if (tst->os_state.stk_id == (UWord)-1) {
+      /* This thread doesn't have a stack set yet. */
+      VG_(debugLog)(2, "syswrap-solaris",
+                       "Stack set to %#lx-%#lx (new) for thread %d.\n",
+                       new_start, new_end, tid);
+      tst->os_state.stk_id = VG_(register_stack)(new_start, new_end);
+   }
+   else {
+      /* Change a thread stack. */
+      VG_(debugLog)(2, "syswrap-solaris",
+                       "Stack set to %#lx-%#lx (change) for thread %d.\n",
+                       new_start, new_end, tid);
+      VG_(change_stack)(tst->os_state.stk_id, new_start, new_end);
+   }
+   tst->client_stack_highest_byte = new_end;
+   tst->client_stack_szB = new_size;
+}
+
+/* ---------------------------------------------------------------------
+   Door tracking. Used mainly for server side where door_return()
+   parameters alone do not contain sufficient information.
+   Also used on client side when new door descriptors are passed via
+   door_call() in desc_ptr. Not used for tracking door descriptors
+   explicitly open()'ed [generic fd tracking is used in that case].
+   ------------------------------------------------------------------ */
+
+/* One of these is allocated for each created door. */
+typedef struct OpenDoor
+{
+   Bool server; /* TRUE = server door, FALSE = client door */
+   Int fd;      /* The file descriptor. */
+   union {
+      /* Server side. */
+      struct {
+         Addr server_procedure;  /* The door server procedure. */
+         HChar *pathname;        /* NULL if unknown. */
+      };
+      /* Client side. */
+      struct {
+         /* Hook called during PRE door_call()
+            to check contents of params->data_ptr. */
+         void (*pre_mem_hook)(ThreadId tid, Int fd,
+                              void *data_ptr, SizeT data_size);
+         /* Hook called during POST door_call()
+            to define contents of params->rbuf. */
+         void (*post_mem_hook)(ThreadId tid, Int fd,
+                               void *rbuf, SizeT rsize);
+      };
+   };
+   struct OpenDoor *next, *prev;
+} OpenDoor;
+
+/* List of allocated door fds. */
+static OpenDoor *doors_recorded = NULL;
+static UInt nr_doors_recorded = 0;
+
+static OpenDoor *door_record_create(void)
+{
+   OpenDoor *d = VG_(malloc)("syswrap.door_record_create.1", sizeof(OpenDoor));
+   d->prev = NULL;
+   d->next = doors_recorded;
+   if (doors_recorded != NULL)
+      doors_recorded->prev = d;
+   doors_recorded = d;
+   nr_doors_recorded += 1;
+
+   return d;
+}
+
+/* Records a server door. */
+static void door_record_server(ThreadId tid, Addr server_procedure, Int fd)
+{
+   OpenDoor *d = doors_recorded;
+
+   while (d != NULL) {
+      if ((d->server == TRUE) && (d->server_procedure == server_procedure)) {
+         if (d->pathname) {
+            VG_(free)(d->pathname);
+         }
+         break;
+      }
+      d = d->next;
+   }
+
+   if (d == NULL)
+      d = door_record_create();
+   vg_assert(d != NULL);
+
+   d->server = TRUE;
+   d->fd = fd;
+   d->server_procedure = server_procedure;
+   d->pathname = NULL;
+}
+
+/* Records a client door. */
+static void door_record_client(ThreadId tid, Int fd,
+   void (*pre_mem_hook)(ThreadId tid, Int fd, void *data_ptr, SizeT data_size),
+   void (*post_mem_hook)(ThreadId tid, Int fd, void *rbuf, SizeT rsize))
+{
+   OpenDoor *d = doors_recorded;
+
+   while (d != NULL) {
+      if ((d->server == FALSE) && (d->fd == fd))
+         break;
+      d = d->next;
+   }
+
+   if (d == NULL)
+      d = door_record_create();
+   vg_assert(d != NULL);
+
+   d->server = FALSE;
+   d->fd = fd;
+   d->pre_mem_hook = pre_mem_hook;
+   d->post_mem_hook = post_mem_hook;
+}
+
+/* Revokes an open door, be it server side or client side. */
+static void door_revoke(ThreadId tid, Int fd)
+{
+   OpenDoor *d = doors_recorded;
+
+   while (d != NULL) {
+      if (d->fd == fd) {
+         if (d->prev != NULL)
+            d->prev->next = d->next;
+         else
+            doors_recorded = d->next;
+         if (d->next != NULL)
+            d->next->prev = d->prev;
+
+         if ((d->server == TRUE) && (d->pathname != NULL))
+            VG_(free)(d->pathname);
+         VG_(free)(d);
+         nr_doors_recorded -= 1;
+         return;
+      }
+      d = d->next;
+   }
+}
+
+/* Attaches a server door to a filename. */
+static void door_server_fattach(Int fd, HChar *pathname)
+{
+   OpenDoor *d = doors_recorded;
+
+   while (d != NULL) {
+      if (d->fd == fd) {
+         vg_assert(d->server == TRUE);
+
+         if (d->pathname != NULL)
+            VG_(free)(d->pathname);
+         d->pathname = VG_(strdup)("syswrap.door_server_fattach.1", pathname);
+         return;
+      }
+      d = d->next;
+   }
+}
+
+/* Finds a server door based on server procedure. */
+static const OpenDoor *door_find_by_proc(Addr server_procedure)
+{
+   OpenDoor *d = doors_recorded;
+
+   while (d != NULL) {
+      if ((d->server) && (d->server_procedure == server_procedure))
+         return d;
+      d = d->next;
+   }
+
+   return NULL;
+}
+
+/* Finds a client door based on fd. */
+static const OpenDoor *door_find_by_fd(Int fd)
+{
+   OpenDoor *d = doors_recorded;
+
+   while (d != NULL) {
+      if ((d->server == FALSE) && (d->fd == fd))
+         return d;
+      d = d->next;
+   }
+
+   return NULL;
+}
+
+/* ---------------------------------------------------------------------
+   PRE/POST wrappers for Solaris-specific syscalls
+   ------------------------------------------------------------------ */
+
+#define PRE(name)       DEFN_PRE_TEMPLATE(solaris, name)
+#define POST(name)      DEFN_POST_TEMPLATE(solaris, name)
+
+/* prototypes */
+DECL_TEMPLATE(solaris, sys_exit);
+#if defined(SOLARIS_SPAWN_SYSCALL)
+DECL_TEMPLATE(solaris, sys_spawn);
+#endif /* SOLARIS_SPAWN_SYSCALL */
+#if defined(SOLARIS_OLD_SYSCALLS)
+DECL_TEMPLATE(solaris, sys_open);
+#endif /* SOLARIS_OLD_SYSCALLS */
+DECL_TEMPLATE(solaris, sys_close);
+DECL_TEMPLATE(solaris, sys_linkat);
+DECL_TEMPLATE(solaris, sys_symlinkat);
+DECL_TEMPLATE(solaris, sys_time);
+DECL_TEMPLATE(solaris, sys_brk);
+DECL_TEMPLATE(solaris, sys_stat);
+DECL_TEMPLATE(solaris, sys_lseek);
+DECL_TEMPLATE(solaris, sys_mount);
+DECL_TEMPLATE(solaris, sys_readlinkat);
+DECL_TEMPLATE(solaris, sys_stime);
+DECL_TEMPLATE(solaris, sys_fstat);
+#if defined(SOLARIS_FREALPATHAT_SYSCALL)
+DECL_TEMPLATE(solaris, sys_frealpathat);
+#endif /* SOLARIS_FREALPATHAT_SYSCALL */
+DECL_TEMPLATE(solaris, sys_stty);
+DECL_TEMPLATE(solaris, sys_gtty);
+DECL_TEMPLATE(solaris, sys_pgrpsys);
+DECL_TEMPLATE(solaris, sys_pipe);
+DECL_TEMPLATE(solaris, sys_faccessat);
+DECL_TEMPLATE(solaris, sys_mknodat);
+DECL_TEMPLATE(solaris, sys_sysi86);
+DECL_TEMPLATE(solaris, sys_shmsys);
+DECL_TEMPLATE(solaris, sys_semsys);
+DECL_TEMPLATE(solaris, sys_ioctl);
+DECL_TEMPLATE(solaris, sys_fchownat);
+DECL_TEMPLATE(solaris, sys_fdsync);
+DECL_TEMPLATE(solaris, sys_execve);
+DECL_TEMPLATE(solaris, sys_fcntl);
+DECL_TEMPLATE(solaris, sys_renameat);
+DECL_TEMPLATE(solaris, sys_unlinkat);
+DECL_TEMPLATE(solaris, sys_fstatat);
+DECL_TEMPLATE(solaris, sys_openat);
+DECL_TEMPLATE(solaris, sys_tasksys);
+DECL_TEMPLATE(solaris, sys_getpagesizes);
+DECL_TEMPLATE(solaris, sys_lwp_park);
+DECL_TEMPLATE(solaris, sys_sendfilev);
+#if defined(SOLARIS_LWP_NAME_SYSCALL)
+DECL_TEMPLATE(solaris, sys_lwp_name);
+#endif /* SOLARIS_LWP_NAME_SYSCALL */
+DECL_TEMPLATE(solaris, sys_privsys);
+DECL_TEMPLATE(solaris, sys_ucredsys);
+DECL_TEMPLATE(solaris, sys_getmsg);
+DECL_TEMPLATE(solaris, sys_putmsg);
+DECL_TEMPLATE(solaris, sys_lstat);
+DECL_TEMPLATE(solaris, sys_sigprocmask);
+DECL_TEMPLATE(solaris, sys_sigaction);
+DECL_TEMPLATE(solaris, sys_sigpending);
+DECL_TEMPLATE(solaris, sys_getsetcontext);
+DECL_TEMPLATE(solaris, sys_fchmodat);
+DECL_TEMPLATE(solaris, sys_mkdirat);
+DECL_TEMPLATE(solaris, sys_statvfs);
+DECL_TEMPLATE(solaris, sys_fstatvfs);
+DECL_TEMPLATE(solaris, sys_nfssys);
+DECL_TEMPLATE(solaris, sys_waitid);
+#if defined(SOLARIS_UTIMESYS_SYSCALL)
+DECL_TEMPLATE(solaris, sys_utimesys);
+#endif /* SOLARIS_UTIMESYS_SYSCALL */
+#if defined(SOLARIS_UTIMENSAT_SYSCALL)
+DECL_TEMPLATE(solaris, sys_utimensat);
+#endif /* SOLARIS_UTIMENSAT_SYSCALL */
+DECL_TEMPLATE(solaris, sys_sigresend);
+DECL_TEMPLATE(solaris, sys_priocntlsys);
+DECL_TEMPLATE(solaris, sys_pathconf);
+DECL_TEMPLATE(solaris, sys_mmap);
+#if defined(SOLARIS_UUIDSYS_SYSCALL)
+DECL_TEMPLATE(solaris, sys_uuidsys);
+#endif /* SOLARIS_UUIDSYS_SYSCALL */
+DECL_TEMPLATE(solaris, sys_mmapobj);
+DECL_TEMPLATE(solaris, sys_memcntl);
+DECL_TEMPLATE(solaris, sys_getpmsg);
+DECL_TEMPLATE(solaris, sys_putpmsg);
+#if defined(SOLARIS_OLD_SYSCALLS)
+DECL_TEMPLATE(solaris, sys_rename);
+#endif /* SOLARIS_OLD_SYSCALLS */
+DECL_TEMPLATE(solaris, sys_uname);
+DECL_TEMPLATE(solaris, sys_setegid);
+DECL_TEMPLATE(solaris, sys_sysconfig);
+DECL_TEMPLATE(solaris, sys_systeminfo);
+DECL_TEMPLATE(solaris, sys_seteuid);
+DECL_TEMPLATE(solaris, sys_forksys);
+DECL_TEMPLATE(solaris, sys_sigtimedwait);
+DECL_TEMPLATE(solaris, sys_yield);
+DECL_TEMPLATE(solaris, sys_lwp_sema_post);
+DECL_TEMPLATE(solaris, sys_lwp_sema_trywait);
+DECL_TEMPLATE(solaris, sys_lwp_detach);
+DECL_TEMPLATE(solaris, sys_fchroot);
+DECL_TEMPLATE(solaris, sys_gettimeofday);
+DECL_TEMPLATE(solaris, sys_lwp_create);
+DECL_TEMPLATE(solaris, sys_lwp_exit);
+DECL_TEMPLATE(solaris, sys_lwp_suspend);
+DECL_TEMPLATE(solaris, sys_lwp_continue);
+#if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
+DECL_TEMPLATE(solaris, sys_lwp_sigqueue);
+#else
+DECL_TEMPLATE(solaris, sys_lwp_kill);
+#endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
+DECL_TEMPLATE(solaris, sys_lwp_self);
+DECL_TEMPLATE(solaris, sys_lwp_sigmask);
+DECL_TEMPLATE(solaris, sys_lwp_private);
+DECL_TEMPLATE(solaris, sys_lwp_wait);
+DECL_TEMPLATE(solaris, sys_lwp_mutex_wakeup);
+DECL_TEMPLATE(solaris, sys_lwp_cond_broadcast);
+DECL_TEMPLATE(solaris, sys_pread);
+DECL_TEMPLATE(solaris, sys_pwrite);
+DECL_TEMPLATE(solaris, sys_rusagesys);
+DECL_TEMPLATE(solaris, sys_port);
+DECL_TEMPLATE(solaris, sys_pollsys);
+DECL_TEMPLATE(solaris, sys_labelsys);
+DECL_TEMPLATE(solaris, sys_acl);
+DECL_TEMPLATE(solaris, sys_auditsys);
+DECL_TEMPLATE(solaris, sys_p_online);
+DECL_TEMPLATE(solaris, sys_sigqueue);
+DECL_TEMPLATE(solaris, sys_clock_gettime);
+DECL_TEMPLATE(solaris, sys_clock_settime);
+DECL_TEMPLATE(solaris, sys_clock_getres);
+DECL_TEMPLATE(solaris, sys_timer_create);
+DECL_TEMPLATE(solaris, sys_timer_delete);
+DECL_TEMPLATE(solaris, sys_timer_settime);
+DECL_TEMPLATE(solaris, sys_timer_gettime);
+DECL_TEMPLATE(solaris, sys_timer_getoverrun);
+DECL_TEMPLATE(solaris, sys_facl);
+DECL_TEMPLATE(solaris, sys_door);
+DECL_TEMPLATE(solaris, sys_schedctl);
+DECL_TEMPLATE(solaris, sys_resolvepath);
+DECL_TEMPLATE(solaris, sys_lwp_mutex_timedlock);
+DECL_TEMPLATE(solaris, sys_lwp_rwlock_sys);
+DECL_TEMPLATE(solaris, sys_lwp_sema_timedwait);
+DECL_TEMPLATE(solaris, sys_zone);
+DECL_TEMPLATE(solaris, sys_getcwd);
+DECL_TEMPLATE(solaris, sys_so_socket);
+DECL_TEMPLATE(solaris, sys_so_socketpair);
+DECL_TEMPLATE(solaris, sys_bind);
+DECL_TEMPLATE(solaris, sys_listen);
+DECL_TEMPLATE(solaris, sys_accept);
+DECL_TEMPLATE(solaris, sys_connect);
+DECL_TEMPLATE(solaris, sys_shutdown);
+DECL_TEMPLATE(solaris, sys_recv);
+DECL_TEMPLATE(solaris, sys_recvfrom);
+DECL_TEMPLATE(solaris, sys_recvmsg);
+DECL_TEMPLATE(solaris, sys_send);
+DECL_TEMPLATE(solaris, sys_sendmsg);
+DECL_TEMPLATE(solaris, sys_sendto);
+DECL_TEMPLATE(solaris, sys_getpeername);
+DECL_TEMPLATE(solaris, sys_getsockname);
+DECL_TEMPLATE(solaris, sys_getsockopt);
+DECL_TEMPLATE(solaris, sys_setsockopt);
+DECL_TEMPLATE(solaris, sys_lwp_mutex_register);
+DECL_TEMPLATE(solaris, sys_uucopy);
+DECL_TEMPLATE(solaris, sys_umount2);
+
+DECL_TEMPLATE(solaris, fast_gethrtime);
+DECL_TEMPLATE(solaris, fast_gethrvtime);
+DECL_TEMPLATE(solaris, fast_gethrestime);
+#if defined(SOLARIS_GETHRT_FASTTRAP)
+DECL_TEMPLATE(solaris, fast_gethrt);
+#endif /* SOLARIS_GETHRT_FASTTRAP */
+#if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
+DECL_TEMPLATE(solaris, fast_getzoneoffset);
+#endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
+
+/* implementation */
+PRE(sys_exit)
+{
+   /* void exit(int status); */
+   ThreadId t;
+
+   PRINT("sys_exit( %ld )", ARG1);
+   PRE_REG_READ1(void, "exit", int, status);
+
+   for (t = 1; t < VG_N_THREADS; t++) {
+      if (VG_(threads)[t].status == VgTs_Empty)
+         continue;
+
+      VG_(threads)[t].exitreason = VgSrc_ExitProcess;
+      VG_(threads)[t].os_state.exitcode = ARG1;
+
+      /* Unblock it, if blocked. */
+      if (t != tid)
+         VG_(get_thread_out_of_syscall)(t);
+   }
+
+   /* We have to claim the syscall already succeeded. */
+   SET_STATUS_Success(0);
+}
+
+#if defined(SOLARIS_SPAWN_SYSCALL)
+static Bool spawn_pre_check_kfa(ThreadId tid, SyscallStatus *status,
+                                vki_kfile_attr_t *kfa)
+{
+   PRE_FIELD_READ("spawn(attrs->kfa_size)", kfa->kfa_size);
+   PRE_FIELD_READ("spawn(attrs->kfa_type)", kfa->kfa_type);
+
+   if (ML_(safe_to_deref)(kfa, kfa->kfa_size)) {
+      switch (kfa->kfa_type) {
+      case VKI_FA_DUP2:
+         PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
+         PRE_FIELD_READ("spawn(attrs->kfa_newfiledes)", kfa->kfa_newfiledes);
+         if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(dup2)", tid, False) ||
+             !ML_(fd_allowed)(kfa->kfa_newfiledes, "spawn(dup2)", tid, False)) {
+            SET_STATUS_Failure(VKI_EBADF);
+            return False;
+         }
+         break;
+      case VKI_FA_CLOSE:
+         PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
+         /* If doing -d style logging (which is to fd = 2 = stderr),
+            don't allow that filedes to be closed. See ML_(fd_allowed)(). */
+         if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(close)", tid, False) ||
+             (kfa->kfa_filedes == 2 && VG_(debugLog_getLevel)() > 0)) {
+            SET_STATUS_Failure(VKI_EBADF);
+            return False;
+         }
+         break;
+      case VKI_FA_CLOSEFROM:
+         /* :TODO: All file descriptors greater than or equal to
+            kfa->kfa_filedes would have to be checked. */
+         VG_(unimplemented)("Support for spawn() with file attribute type "
+                            "FA_CLOSEFROM.");
+         break;
+      case VKI_FA_OPEN:
+         PRE_FIELD_READ("spawn(attrs->kfa_filedes)", kfa->kfa_filedes);
+         PRE_FIELD_READ("spawn(attrs->kfa_oflag)", kfa->kfa_oflag);
+         PRE_FIELD_READ("spawn(attrs->kfa_mode)", kfa->kfa_mode);
+         if (!ML_(fd_allowed)(kfa->kfa_filedes, "spawn(open)", tid, False)) {
+            SET_STATUS_Failure(VKI_EBADF);
+            return False;
+         }
+         /* fallthrough */
+      case VKI_FA_CHDIR:
+         PRE_FIELD_READ("spawn(attrs->kfa_pathsize)", kfa->kfa_pathsize);
+         if (kfa->kfa_pathsize != 0) {
+            PRE_MEM_RASCIIZ("spawn(attrs->kfa_data)", (Addr) kfa->kfa_data);
+         }
+         break;
+      default:
+         VG_(unimplemented)("Support for spawn() with file attribute type %u.",
+                            kfa->kfa_type);
+      }
+   }
+
+   return True;
+}
+
+PRE(sys_spawn)
+{
+   /* int spawn(char *path, void *attrs, size_t attrsize,
+                char *argenv, size_t aesize); */
+   PRINT("sys_spawn ( %#lx(%s), %#lx, %ld, %#lx, %ld )",
+         ARG1, (HChar *) ARG1, ARG2, ARG3, ARG4, ARG5);
+   PRE_REG_READ5(long, "spawn", const char *, path, void *, attrs,
+                 size_t, attrsize, char *, argenv, size_t, aesize);
+
+   /* First check input arguments. */
+   PRE_MEM_RASCIIZ("spawn(path)", ARG1);
+   if (ARG3 > 0) {
+      /*  --- vki_kspawn_attr_t --
+          | ksa_version          |
+          | ksa_size             |
+          | ksa_attr_off         |  -----| (only if != 0)
+          | ksa_attr_size        |       |
+          | ksa_path_off         |  =====|====| (only if != 0)
+          | ksa_path_size        |       |    |
+          | ksa_shell_off        |  -----|----|----| (only if != 0)
+          | ksa_shell_size       |       |    |    |
+          | ksa_data[0]          |       |    |    |
+          ------------------------       |    |    |
+          | vki_spawn_attr_t     |  <----|    |    |
+          ------------------------            |    |
+          | path                 |  <---------|    |
+          ------------------------                 |
+          | shell                |  <---------------
+          ------------------------
+          | file actions         |  (not included in ksa_size, only in ARG3)
+          ------------------------
+
+          ksa_size = sizeof(vki_kspawn_attr_t) + ksa_attr_size + ksa_path_size +
+                     ksa_shell_size
+          attrs_size (ARG3) = ksa_size + file actions size */
+
+      vki_kspawn_attr_t *attrs = (vki_kspawn_attr_t *) ARG2;
+      PRE_FIELD_READ("spawn(attrs->ksa_version)", attrs->ksa_version);
+      PRE_FIELD_READ("spawn(attrs->ksa_size)", attrs->ksa_size);
+      PRE_FIELD_READ("spawn(attrs->ksa_attr_off)", attrs->ksa_attr_off);
+      PRE_FIELD_READ("spawn(attrs->ksa_path_off)", attrs->ksa_path_off);
+      PRE_FIELD_READ("spawn(attrs->ksa_shell_off)", attrs->ksa_shell_off);
+
+      if (ML_(safe_to_deref)(attrs, sizeof(vki_kspawn_attr_t))) {
+         if (attrs->ksa_version != VKI_SPAWN_VERSION) {
+            VG_(unimplemented)("Support for spawn() with attributes "
+                               "version %u.", attrs->ksa_version);
+         }
+
+         if (attrs->ksa_attr_off != 0) {
+            PRE_FIELD_READ("spawn(attrs->ksa_attr_size)", attrs->ksa_attr_size);
+            vki_spawn_attr_t *sap =
+                (vki_spawn_attr_t *) ((Addr) attrs + attrs->ksa_attr_off);
+            PRE_MEM_READ("spawn(attrs->ksa_attr)",
+                         (Addr) sap, attrs->ksa_attr_size);
+            if (ML_(safe_to_deref)(sap, sizeof(vki_spawn_attr_t))) {
+               if (sap->sa_psflags & VKI_POSIX_SPAWN_SETVAMASK_NP) {
+                  VG_(unimplemented)("Support for spawn() with attributes flag "
+                                     "including POSIX_SPAWN_SETVAMASK_NP.");
+               }
+               /* paranoia */
+               Int rem = sap->sa_psflags & ~(
+                  VKI_POSIX_SPAWN_RESETIDS      | VKI_POSIX_SPAWN_SETPGROUP |
+                  VKI_POSIX_SPAWN_SETSIGDEF     | VKI_POSIX_SPAWN_SETSIGMASK |
+                  VKI_POSIX_SPAWN_SETSCHEDPARAM | VKI_POSIX_SPAWN_SETSCHEDULER |
+                  VKI_POSIX_SPAWN_SETSID_NP     | VKI_POSIX_SPAWN_SETVAMASK_NP |
+                  VKI_POSIX_SPAWN_SETSIGIGN_NP  | VKI_POSIX_SPAWN_NOSIGCHLD_NP |
+                  VKI_POSIX_SPAWN_WAITPID_NP    | VKI_POSIX_SPAWN_NOEXECERR_NP);
+               if (rem != 0) {
+                  VG_(unimplemented)("Support for spawn() with attributes flag "
+                                     "%#x.", sap->sa_psflags);
+               }
+            }
+         }
+
+         if (attrs->ksa_path_off != 0) {
+            PRE_FIELD_READ("spawn(attrs->ksa_path_size)", attrs->ksa_path_size);
+            PRE_MEM_RASCIIZ("spawn(attrs->ksa_path)",
+                            (Addr) attrs + attrs->ksa_path_off);
+         }
+
+         if (attrs->ksa_shell_off != 0) {
+            PRE_FIELD_READ("spawn(attrs->ksa_shell_size)",
+                           attrs->ksa_shell_size);
+            PRE_MEM_RASCIIZ("spawn(attrs->ksa_shell)",
+                            (Addr) attrs + attrs->ksa_shell_off);
+         }
+
+         vki_kfile_attr_t *kfa = (vki_kfile_attr_t *) (ARG2 + attrs->ksa_size);
+         while ((Addr) kfa < ARG2 + ARG3) {
+            if (spawn_pre_check_kfa(tid, status, kfa) == False) {
+               return;
+            }
+            kfa = (vki_kfile_attr_t *) ((Addr) kfa + kfa->kfa_size);
+         }
+      }
+   }
+   PRE_MEM_READ("spawn(argenv)", ARG4, ARG5);
+
+   /* Check that the name at least begins in client-accessible storage. */
+   if ((ARG1 == 0) || !ML_(safe_to_deref)((HChar *) ARG1, 1)) {
+      SET_STATUS_Failure(VKI_EFAULT);
+      return;
+   }
+
+   /* Check that attrs reside in client-accessible storage. */
+   if (ARG2 != 0) {
+      if (!VG_(am_is_valid_for_client)(ARG2, ARG3, VKI_PROT_READ)) {
+         SET_STATUS_Failure(VKI_EFAULT);
+         return;
+      }
+   }
+
+   /* Check that the argenv reside in client-accessible storage.
+      Solaris disallows to perform spawn() without any arguments & environment
+      variables specified. */
+   if ((ARG4 == 0) /* obviously bogus */ ||
+       !VG_(am_is_valid_for_client)(ARG4, ARG5, VKI_PROT_READ)) {
+      SET_STATUS_Failure(VKI_EFAULT);
+      return;
+   }
+
+   /* Copy existing attrs or create empty minimal ones. */
+   vki_kspawn_attr_t *attrs;
+   SizeT attrs_size;
+   if (ARG2 == 0) {
+      /* minimalistic kspawn_attr_t + spawn_attr_t */
+      attrs_size = sizeof(vki_kspawn_attr_t) + sizeof(vki_spawn_attr_t);
+      attrs = VG_(calloc)("syswrap.spawn.1", 1, attrs_size);
+      attrs->ksa_version = VKI_SPAWN_VERSION;
+      attrs->ksa_size = attrs_size;
+      attrs->ksa_attr_off = sizeof(vki_kspawn_attr_t);
+      attrs->ksa_attr_size = sizeof(vki_spawn_attr_t);
+   } else if (((vki_kspawn_attr_t *) ARG2)->ksa_attr_off == 0) {
+      /* existing kspawn_attr_t but missing spawn_attr_t */
+      attrs_size = ARG3 + sizeof(vki_spawn_attr_t);
+      attrs = VG_(calloc)("syswrap.spawn.2", 1, attrs_size);
+      VG_(memcpy)(attrs, (void *) ARG2, sizeof(vki_kspawn_attr_t));
+      SizeT file_actions_size = ARG3 - attrs->ksa_size;
+      attrs->ksa_size += sizeof(vki_spawn_attr_t);
+      attrs->ksa_attr_off = sizeof(vki_kspawn_attr_t);
+      attrs->ksa_attr_size = sizeof(vki_spawn_attr_t);
+      if (attrs->ksa_path_off != 0) {
+         VG_(memcpy)((HChar *) attrs + attrs->ksa_path_off +
+                     sizeof(vki_spawn_attr_t), (HChar *) ARG2 +
+                     attrs->ksa_path_off, attrs->ksa_path_size);
+         attrs->ksa_path_off += sizeof(vki_spawn_attr_t);
+      }
+      if (attrs->ksa_shell_off != 0) {
+         VG_(memcpy)((HChar *) attrs + attrs->ksa_shell_off +
+                     sizeof(vki_spawn_attr_t), (HChar *) ARG2 +
+                     attrs->ksa_shell_off, attrs->ksa_shell_size);
+         attrs->ksa_shell_off += sizeof(vki_spawn_attr_t);
+      }
+      if (file_actions_size > 0) {
+         VG_(memcpy)((HChar *) attrs + attrs_size - file_actions_size,
+                     (HChar *) ARG2 + ARG3 - file_actions_size,
+                     file_actions_size);
+      }
+   } else {
+      /* existing kspawn_attr_t + spawn_attr_t */
+      attrs_size = ARG3;
+      attrs = VG_(malloc)("syswrap.spawn.3", attrs_size);
+      VG_(memcpy)(attrs, (void *) ARG2, attrs_size);
+   }
+   vki_spawn_attr_t *spa = (vki_spawn_attr_t *) ((HChar *) attrs +
+                                                 attrs->ksa_attr_off);
+
+   /* Convert argv and envp parts of argenv into their separate XArray's.
+      Duplicate strings because argv and envp will be then modified. */
+   XArray *argv = VG_(newXA)(VG_(malloc), "syswrap.spawn.4",
+                             VG_(free), sizeof(HChar *));
+   XArray *envp = VG_(newXA)(VG_(malloc), "syswrap.spawn.5",
+                             VG_(free), sizeof(HChar *));
+
+   HChar *argenv = (HChar *) ARG4;
+   XArray *current_xa = argv;
+   while ((Addr) argenv < ARG4 + ARG5) {
+      if (*argenv == '\0') {
+         argenv += 1;
+         if (current_xa == argv) {
+            current_xa = envp;
+            if ((*argenv == '\0') && ((Addr) argenv == ARG4 + ARG5 - 1)) {
+               /* envp part is empty, it contained only {NULL}. */
+               break;
+            }
+         } else {
+            if ((Addr) argenv != ARG4 + ARG5) {
+               if (VG_(clo_trace_syscalls))
+                  VG_(debugLog)(3, "syswrap-solaris", "spawn: bogus argenv\n");
+               SET_STATUS_Failure(VKI_EINVAL);
+               goto exit;
+            }
+            break;
+         }
+      }
+
+      if (*argenv != '\1') {
+         if (VG_(clo_trace_syscalls))
+            VG_(debugLog)(3, "syswrap-solaris", "spawn: bogus argenv\n");
+         SET_STATUS_Failure(VKI_EINVAL);
+         goto exit;
+      }
+      argenv += 1;
+
+      HChar *duplicate = VG_(strdup)("syswrap.spawn.6", argenv);
+      VG_(addToXA)(current_xa, &duplicate);
+      argenv += VG_(strlen)(argenv) + 1;
+   }
+
+   /* Debug-only printing. */
+   if (0) {
+      VG_(printf)("\nARG1 = %#lx(%s)\n", ARG1, (HChar *) ARG1);
+      VG_(printf)("ARG4 (argv) = ");
+      for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
+         VG_(printf)("%s ", *(HChar **) VG_(indexXA)(argv, i));
+      }
+
+      VG_(printf)("\nARG4 (envp) = ");
+      for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
+         VG_(printf)("%s ", *(HChar **) VG_(indexXA)(envp, i));
+      }
+      VG_(printf)("\n");
+   }
+
+   /* Decide whether or not we want to trace the spawned child.
+      Omit the executable name itself from child_argv. */
+   const HChar **child_argv = VG_(malloc)("syswrap.spawn.7",
+                                     (VG_(sizeXA)(argv) - 1) * sizeof(HChar *));
+   for (Word i = 1; i < VG_(sizeXA)(argv); i++) {
+      child_argv[i - 1] = *(HChar **) VG_(indexXA)(argv, i);
+   }
+   Bool trace_this_child = VG_(should_we_trace_this_child)((HChar *) ARG1,
+                                                           child_argv);
+   VG_(free)(child_argv);
+
+   /* If we're tracing the child, and the launcher name looks bogus (possibly
+      because launcher.c couldn't figure it out, see comments therein) then we
+      have no option but to fail. */
+   if (trace_this_child &&
+       (!VG_(name_of_launcher) || VG_(name_of_launcher)[0] != '/')) {
+      SET_STATUS_Failure(VKI_ECHILD); /* "No child processes." */
+      goto exit;
+   }
+
+   /* Set up the child's exe path. */
+   const HChar *path = (const HChar *) ARG1;
+   const HChar *launcher_basename = NULL;
+   if (trace_this_child) {
+      /* We want to exec the launcher. */
+      path = VG_(name_of_launcher);
+      vg_assert(path != NULL);
+
+      launcher_basename = VG_(strrchr)(path, '/');
+      if ((launcher_basename == NULL) || (launcher_basename[1] == '\0')) {
+         launcher_basename = path;  /* hmm, tres dubious */
+      } else {
+         launcher_basename++;
+      }
+   }
+
+   /* Set up the child's environment.
+
+      Remove the valgrind-specific stuff from the environment so the child
+      doesn't get vgpreload_core.so, vgpreload_<tool>.so, etc. This is done
+      unconditionally, since if we are tracing the child, the child valgrind
+      will set up the appropriate client environment.
+
+      Then, if tracing the child, set VALGRIND_LIB for it. */
+   HChar **child_envp = VG_(calloc)("syswrap.spawn.8",
+                                    VG_(sizeXA)(envp) + 1, sizeof(HChar *));
+   for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
+      child_envp[i] = *(HChar **) VG_(indexXA)(envp, i);
+   }
+   VG_(env_remove_valgrind_env_stuff)(child_envp, /* ro_strings */ False,
+                                      VG_(free));
+
+   /* Stuff was removed from child_envp, reflect that in envp XArray. */
+   VG_(dropTailXA)(envp, VG_(sizeXA)(envp));
+   for (UInt i = 0; child_envp[i] != NULL; i++) {
+      VG_(addToXA)(envp, &child_envp[i]);
+   }
+   VG_(free)(child_envp);
+
+   if (trace_this_child) {
+      /* Set VALGRIND_LIB in envp. */
+      SizeT len = VG_(strlen)(VALGRIND_LIB) + VG_(strlen)(VG_(libdir)) + 2;
+      HChar *valstr = VG_(malloc)("syswrap.spawn.9", len);
+      VG_(sprintf)(valstr, "%s=%s", VALGRIND_LIB, VG_(libdir));
+      VG_(addToXA)(envp, &valstr);
+   }
+
+   /* Set up the child's args. If not tracing it, they are left untouched.
+      Otherwise, they are:
+
+      [launcher_basename] ++ VG_(args_for_valgrind) ++ [ARG1] ++ ARG4[1..],
+
+      except that the first VG_(args_for_valgrind_noexecpass) args are
+      omitted. */
+   if (trace_this_child) {
+      vg_assert(VG_(args_for_valgrind) != NULL);
+      vg_assert(VG_(args_for_valgrind_noexecpass) >= 0);
+      vg_assert(VG_(args_for_valgrind_noexecpass)
+                   <= VG_(sizeXA)(VG_(args_for_valgrind)));
+
+      /* So what args will there be? Bear with me... */
+      /* ... launcher basename, ... */
+      HChar *duplicate = VG_(strdup)("syswrap.spawn.10", launcher_basename);
+      VG_(insertIndexXA)(argv, 0, &duplicate);
+
+      /* ... Valgrind's args, ... */
+      UInt v_args = VG_(sizeXA)(VG_(args_for_valgrind));
+      v_args -= VG_(args_for_valgrind_noexecpass);
+      for (Word i = VG_(args_for_valgrind_noexecpass);
+           i < VG_(sizeXA)(VG_(args_for_valgrind)); i++) {
+         duplicate = VG_(strdup)("syswrap.spawn.11",
+                           *(HChar **) VG_(indexXA)(VG_(args_for_valgrind), i));
+         VG_(insertIndexXA)(argv, 1 + i, &duplicate);
+      }
+
+      /* ... name of client executable, ... */
+      duplicate = VG_(strdup)("syswrap.spawn.12", (HChar *) ARG1);
+      VG_(insertIndexXA)(argv, 1 + v_args, &duplicate);
+
+      /* ... and args for client executable (without [0]). */
+      duplicate = *(HChar **) VG_(indexXA)(argv, 1 + v_args + 1);
+      VG_(free)(duplicate);
+      VG_(removeIndexXA)(argv, 1 + v_args + 1);
+   }
+
+   /* Debug-only printing. */
+   if (0) {
+      VG_(printf)("\npath = %s\n", path);
+      VG_(printf)("argv = ");
+      for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
+         VG_(printf)("%s ", *(HChar **) VG_(indexXA)(argv, i));
+      }
+
+      VG_(printf)("\nenvp = ");
+      for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
+         VG_(printf)("%s ", *(HChar **) VG_(indexXA)(envp, i));
+      }
+      VG_(printf)("\n");
+   }
+
+   /* Set the signal state up for spawned child.
+
+      Signals set to be caught are equivalent to signals set to the default
+      action, from the child's perspective.
+
+      Therefore query SCSS and prepare default (DFL) and ignore (IGN) signal
+      sets. Then combine these sets with those passed from client, if flags
+      POSIX_SPAWN_SETSIGDEF, or POSIX_SPAWN_SETSIGIGN_NP have been specified.
+   */
+   vki_sigset_t sig_default;
+   vki_sigset_t sig_ignore;
+   VG_(sigemptyset)(&sig_default);
+   VG_(sigemptyset)(&sig_ignore);
+   for (Int i = 1; i < VG_(max_signal); i++) {
+      vki_sigaction_fromK_t sa;
+      VG_(do_sys_sigaction)(i, NULL, &sa); /* query SCSS */
+      if (sa.sa_handler == VKI_SIG_IGN) {
+         VG_(sigaddset)(&sig_ignore, i);
+      } else {
+         VG_(sigaddset)(&sig_default, i);
+      }
+   }
+
+   if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGDEF) {
+      VG_(sigaddset_from_set)(&spa->sa_sigdefault, &sig_default);
+   } else {
+      spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGDEF;
+      spa->sa_sigdefault = sig_default;
+   }
+
+   if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGIGN_NP) {
+      VG_(sigaddset_from_set)(&spa->sa_sigignore, &sig_ignore);
+   } else {
+      spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGIGN_NP;
+      spa->sa_sigignore = sig_ignore;
+   }
+
+   /* Set the signal mask for spawned child.
+
+      Analogous to signal handlers: query SCSS for blocked signals mask
+      and combine this mask with that passed from client, if flag
+      POSIX_SPAWN_SETSIGMASK has been specified. */
+   vki_sigset_t *sigmask = &VG_(get_ThreadState)(tid)->sig_mask;
+   if (spa->sa_psflags & VKI_POSIX_SPAWN_SETSIGMASK) {
+      VG_(sigaddset_from_set)(&spa->sa_sigmask, sigmask);
+   } else {
+      spa->sa_psflags |= VKI_POSIX_SPAWN_SETSIGMASK;
+      spa->sa_sigmask = *sigmask;
+   }
+
+   /* Lastly, reconstruct argenv from argv + envp. */
+   SizeT argenv_size = 1 + 1;
+   for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
+      argenv_size += VG_(strlen)(*(HChar **) VG_(indexXA)(argv, i)) + 2;
+   }
+   for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
+      argenv_size += VG_(strlen)(*(HChar **) VG_(indexXA)(envp, i)) + 2;
+   }
+
+   argenv = VG_(malloc)("syswrap.spawn.13", argenv_size);
+   HChar *current = argenv;
+#define COPY_CHAR_TO_ARGENV(dst, character) \
+   do {                                     \
+      *(dst) = character;                   \
+      (dst) += 1;                           \
+   } while (0)
+#define COPY_STRING_TO_ARGENV(dst, src)       \
+   do {                                       \
+      COPY_CHAR_TO_ARGENV(dst, '\1');         \
+      SizeT src_len = VG_(strlen)((src)) + 1; \
+      VG_(memcpy)((dst), (src), src_len);     \
+      (dst) += src_len;                       \
+   } while (0)
+
+   for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
+      COPY_STRING_TO_ARGENV(current, *(HChar **) VG_(indexXA)(argv, i));
+   }
+   COPY_CHAR_TO_ARGENV(current, '\0');
+   for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
+      COPY_STRING_TO_ARGENV(current, *(HChar **) VG_(indexXA)(envp, i));
+   }
+   COPY_CHAR_TO_ARGENV(current, '\0');
+   vg_assert(current == argenv + argenv_size);
+#undef COPY_CHAR_TO_ARGENV
+#undef COPY_STRING_TOARGENV
+
+   /* HACK: Temporarily restore the DATA rlimit for spawned child.
+      This is a terrible hack to provide sensible brk limit for child. */
+   VG_(setrlimit)(VKI_RLIMIT_DATA, &VG_(client_rlimit_data));
+
+   /* Actual spawn() syscall. */
+   SysRes res = VG_(do_syscall5)(__NR_spawn, (UWord) path, (UWord) attrs,
+                                 attrs_size, (UWord) argenv, argenv_size);
+   SET_STATUS_from_SysRes(res);
+   VG_(free)(argenv);
+
+   /* Restore DATA rlimit back to its previous value set in m_main.c. */
+   struct vki_rlimit zero = { 0, 0 };
+   zero.rlim_max = VG_(client_rlimit_data).rlim_max;
+   VG_(setrlimit)(VKI_RLIMIT_DATA, &zero);
+
+   if (SUCCESS) {
+      PRINT("   spawn: process %d spawned child %ld\n", VG_(getpid)(), RES);
+   }
+
+exit:
+   VG_(free)(attrs);
+   for (Word i = 0; i < VG_(sizeXA)(argv); i++) {
+      VG_(free)(*(HChar **) VG_(indexXA)(argv, i));
+   }
+   for (Word i = 0; i < VG_(sizeXA)(envp); i++) {
+      VG_(free)(*(HChar **) VG_(indexXA)(envp, i));
+   }
+   VG_(deleteXA)(argv);
+   VG_(deleteXA)(envp);
+}
+#endif /* SOLARIS_SPAWN_SYSCALL */
+
+/* Handles the case where the open is of /proc/self/psinfo or
+   /proc/<pid>/psinfo. Fetch fresh contents into psinfo_t,
+   fake fname, psargs, argc and argv. Write the structure to the fake
+   file we cooked up at startup (in m_main) and give out a copy of this
+   fd. Also seek the cloned fd back to the start. */
+static Bool handle_psinfo_open(SyscallStatus *status,
+                               Bool use_openat,
+                               const HChar *filename,
+                               Int arg1, UWord arg3, UWord arg4)
+{
+   if (!ML_(safe_to_deref)((const void *) filename, 1))
+      return False;
+
+   HChar name[VKI_PATH_MAX];    // large enough
+   VG_(sprintf)(name, "/proc/%d/psinfo", VG_(getpid)());
+
+   if (!VG_STREQ(filename, name) && !VG_STREQ(filename, "/proc/self/psinfo"))
+      return False;
+
+   /* use original arguments to open or openat */
+   SysRes sres;
+#if defined(SOLARIS_OLD_SYSCALLS)
+   if (use_openat)
+      sres = VG_(do_syscall4)(SYS_openat, arg1, (UWord) filename,
+                              arg3, arg4);
+   else
+      sres = VG_(do_syscall3)(SYS_open, (UWord) filename, arg3, arg4);
+#else
+   vg_assert(use_openat == True);
+   sres = VG_(do_syscall4)(SYS_openat, arg1, (UWord) filename,
+                           arg3, arg4);
+#endif /* SOLARIS_OLD_SYSCALLS */
+
+   if (sr_isError(sres)) {
+      SET_STATUS_from_SysRes(sres);
+      return True;
+   }
+   Int fd = sr_Res(sres);
+
+   vki_psinfo_t psinfo;
+   sres = VG_(do_syscall3)(SYS_read, fd, (UWord) &psinfo, sizeof(psinfo));
+   if (sr_isError(sres)) {
+      SET_STATUS_from_SysRes(sres);
+      VG_(close)(fd);
+      return True;
+   }
+   if (sr_Res(sres) != sizeof(psinfo)) {
+      SET_STATUS_Failure(VKI_ENODATA);
+      VG_(close)(fd);
+      return True;
+   }
+
+   VG_(close)(fd);
+
+   VG_(client_fname)(psinfo.pr_fname, sizeof(psinfo.pr_fname), True);
+   VG_(client_cmd_and_args)(psinfo.pr_psargs, sizeof(psinfo.pr_psargs));
+
+   Addr *ptr = (Addr *) VG_(get_initial_client_SP)();
+   psinfo.pr_argc = *ptr++;
+   psinfo.pr_argv = (Addr) ptr;
+
+   sres = VG_(do_syscall4)(SYS_pwrite, VG_(cl_psinfo_fd),
+                           (UWord) &psinfo, sizeof(psinfo), 0);
+   if (sr_isError(sres)) {
+      SET_STATUS_from_SysRes(sres);
+      return True;
+   }
+
+   sres = VG_(dup)(VG_(cl_psinfo_fd));
+   SET_STATUS_from_SysRes(sres);
+   if (!sr_isError(sres)) {
+      OffT off = VG_(lseek)(sr_Res(sres), 0, VKI_SEEK_SET);
+      if (off < 0)
+         SET_STATUS_Failure(VKI_EMFILE);
+   }
+
+   return True;
+}
+
+#if defined(SOLARIS_OLD_SYSCALLS)
+PRE(sys_open)
+{
+   /* int open(const char *filename, int flags);
+      int open(const char *filename, int flags, mode_t mode); */
+
+   if (ARG2 & VKI_O_CREAT) {
+      /* 3-arg version */
+      PRINT("sys_open ( %#lx(%s), %ld, %ld )", ARG1, (HChar *) ARG1,
+            ARG2, ARG3);
+      PRE_REG_READ3(long, "open", const char *, filename,
+                    int, flags, vki_mode_t, mode);
+   } else {
+      /* 2-arg version */
+      PRINT("sys_open ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, ARG2);
+      PRE_REG_READ2(long, "open", const char *, filename, int, flags);
+   }
+
+   PRE_MEM_RASCIIZ("open(filename)", ARG1);
+
+   if (ML_(handle_auxv_open)(status, (const HChar*)ARG1, ARG2))
+      return;
+
+   if (handle_psinfo_open(status, False /*use_openat*/, (const HChar*)ARG1, 0,
+                          ARG2, ARG3))
+      return;
+
+   *flags |= SfMayBlock;
+}
+
+POST(sys_open)
+{
+   if (!ML_(fd_allowed)(RES, "open", tid, True)) {
+      VG_(close)(RES);
+      SET_STATUS_Failure(VKI_EMFILE);
+   } else if (VG_(clo_track_fds))
+      ML_(record_fd_open_with_given_name)(tid, RES, (HChar *) ARG1);
+}
+#endif /* SOLARIS_OLD_SYSCALLS */
+
+PRE(sys_close)
+{
+   WRAPPER_PRE_NAME(generic, sys_close)(tid, layout, arrghs, status,
+                                        flags);
+}
+
+POST(sys_close)
+{
+   WRAPPER_POST_NAME(generic, sys_close)(tid, arrghs, status);
+   door_revoke(tid, ARG1);
+   /* Possibly an explicitly open'ed client door fd was just closed.
+      Generic sys_close wrapper calls this only if VG_(clo_track_fds) = True. */
+   if (!VG_(clo_track_fds))
+      ML_(record_fd_close)(ARG1);
+}
+
+PRE(sys_linkat)
+{
+   /* int linkat(int fd1, const char *path1, int fd2,
+                 const char *path2, int flag);
+    */
+
+   /* Interpret the first and third arguments as 32-bit values even on 64-bit
+      architecture. This is different from Linux, for example, where glibc
+      sign-extends them. */
+   Int fd1 = (Int) ARG1;
+   Int fd2 = (Int) ARG3;
+
+   PRINT("sys_linkat ( %d, %#lx(%s), %d, %#lx(%s), %ld )",
+         fd1, ARG2, (HChar *) ARG2, fd2, ARG4, (HChar *) ARG4, ARG5);
+   PRE_REG_READ5(long, "linkat", int, fd1, const char *, path1,
+                 int, fd2, const char *, path2, int, flags);
+   PRE_MEM_RASCIIZ("linkat(path1)", ARG2);
+   PRE_MEM_RASCIIZ("linkat(path2)", ARG4);
+
+   /* Be strict but ignore fd1/fd2 for absolute path1/path2. */
+   if (fd1 != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(fd1, "linkat", tid, False)) {
+      SET_STATUS_Failure(VKI_EBADF);
+   }
+   if (fd2 != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG4, 1)
+       && ((HChar *) ARG4)[0] != '/'
+       && !ML_(fd_allowed)(fd2, "linkat", tid, False)) {
+      SET_STATUS_Failure(VKI_EBADF);
+   }
+
+   *flags |= SfMayBlock;
+}
+
+PRE(sys_symlinkat)
+{
+   /* int symlinkat(const char *path1, int fd, const char *path2); */
+
+   /* Interpret the second argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int fd = (Int) ARG2;
+
+   PRINT("sys_symlinkat ( %#lx(%s), %d, %#lx(%s) )",
+         ARG1, (HChar *) ARG1, fd, ARG3, (HChar *) ARG3);
+   PRE_REG_READ3(long, "symlinkat", const char *, path1, int, fd,
+                 const char *, path2);
+   PRE_MEM_RASCIIZ("symlinkat(path1)", ARG1);
+   PRE_MEM_RASCIIZ("symlinkat(path2)", ARG3);
+
+   /* Be strict but ignore fd for absolute path2. */
+   if (fd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG3, 1)
+       && ((HChar *) ARG3)[0] != '/'
+       && !ML_(fd_allowed)(fd, "symlinkat", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+
+   *flags |= SfMayBlock;
+}
+
+PRE(sys_time)
+{
+   /* time_t time(); */
+   PRINT("sys_time ( )");
+   PRE_REG_READ0(long, "time");
+}
+
+/* Data segment for brk (heap).
+   Initial data segment is established during image initialization
+   (initimg-solaris.c). Notable facts:
+   - VG_(brk_base) is not page aligned; does not move
+   - VG_(brk_limit) moves between [VG_(brk_base), data segment end]
+   - data segment end is always page aligned
+   - right after data segment end is 1-page reservation
+
+            |      heap           |
+     +------+------+--------------+-------+
+     | BSS  | anon |   anon       | resvn |
+     +------+------+--------------+-------+
+
+            ^      ^        ^    ^
+            |      |        |    |
+            |      |        |    data segment end
+            |      |        VG_(brk_limit) -- no alignment constraint
+            |      brk_base_pgup -- page aligned
+            VG_(brk_base) -- not page aligned -- does not move
+
+   Because VG_(brk_base) is not page-aligned and is initially located within
+   pre-established data segment, special care has to be taken in the code below
+   to handle this feature.
+*/
+
+PRE(sys_brk)
+{
+   /* unsigned long brk(caddr_t end_data_segment); */
+   /* The Solaris kernel returns 0 on success.
+      In addition to this, brk(0) returns current data segment end.  This is
+      very different from the Linux kernel, for example. */
+
+   Addr old_brk_limit = VG_(brk_limit);
+   /* If VG_(brk_base) is page-aligned then old_brk_base_pgup is equal to
+      VG_(brk_base). */
+   Addr old_brk_base_pgup = VG_PGROUNDUP(VG_(brk_base));
+   Addr new_brk = ARG1;
+   const NSegment *seg, *seg2;
+
+   PRINT("sys_brk ( %#lx )", ARG1);
+   PRE_REG_READ1(unsigned long, "brk", vki_caddr_t, end_data_segment);
+
+   if (!new_brk) {
+      /* brk(0) - specific to Solaris 11 only. */
+      SET_STATUS_Success(old_brk_limit);
+      return;
+   }
+
+   /* Handle some trivial cases. */
+   if (new_brk == old_brk_limit) {
+      SET_STATUS_Success(0);
+      return;
+   }
+   if (new_brk < VG_(brk_base)) {
+      /* Clearly impossible. */
+      SET_STATUS_Failure(VKI_ENOMEM);
+      return;
+   }
+   if (new_brk - VG_(brk_base) > VG_(client_rlimit_data).rlim_cur) {
+      SET_STATUS_Failure(VKI_ENOMEM);
+      return;
+   }
+
+   if (new_brk < old_brk_limit) {
+      /* Shrinking the data segment.  Be lazy and don't munmap the excess
+         area. */
+      if (old_brk_limit > old_brk_base_pgup) {
+         /* Calculate new local brk (=MAX(new_brk, old_brk_base_pgup)). */
+         Addr new_brk_local;
+         if (new_brk < old_brk_base_pgup)
+            new_brk_local = old_brk_base_pgup;
+         else
+            new_brk_local = new_brk;
+
+         /* Find a segment at the beginning and at the end of the shrinked
+            range. */
+         seg = VG_(am_find_nsegment)(new_brk_local);
+         seg2 = VG_(am_find_nsegment)(old_brk_limit - 1);
+         vg_assert(seg);
+         vg_assert(seg->kind == SkAnonC);
+         vg_assert(seg2);
+         vg_assert(seg == seg2);
+
+         /* Discard any translations and zero-out the area. */
+         if (seg->hasT)
+            VG_(discard_translations)(new_brk_local,
+                                      old_brk_limit - new_brk_local,
+                                      "do_brk(shrink)");
+        /* Since we're being lazy and not unmapping pages, we have to zero out
+           the area, so that if the area later comes back into circulation, it
+           will be filled with zeroes, as if it really had been unmapped and
+           later remapped.  Be a bit paranoid and try hard to ensure we're not
+           going to segfault by doing the write - check that segment is
+           writable. */
+         if (seg->hasW)
+            VG_(memset)((void*)new_brk_local, 0, old_brk_limit - new_brk_local);
+      }
+
+      /* Fixup code if the VG_(brk_base) is not page-aligned. */
+      if (new_brk < old_brk_base_pgup) {
+         /* Calculate old local brk (=MIN(old_brk_limit, old_brk_base_up)). */
+         Addr old_brk_local;
+         if (old_brk_limit < old_brk_base_pgup)
+            old_brk_local = old_brk_limit;
+         else
+            old_brk_local = old_brk_base_pgup;
+
+         /* Find a segment at the beginning and at the end of the shrinked
+            range. */
+         seg = VG_(am_find_nsegment)(new_brk);
+         seg2 = VG_(am_find_nsegment)(old_brk_local - 1);
+         vg_assert(seg);
+         vg_assert(seg2);
+         vg_assert(seg == seg2);
+
+         /* Discard any translations and zero-out the area. */
+         if (seg->hasT)
+            VG_(discard_translations)(new_brk, old_brk_local - new_brk,
+                                      "do_brk(shrink)");
+         if (seg->hasW)
+            VG_(memset)((void*)new_brk, 0, old_brk_local - new_brk);
+      }
+
+      /* We are done, update VG_(brk_limit), tell the tool about the changes,
+         and leave. */
+      VG_(brk_limit) = new_brk;
+      VG_TRACK(die_mem_brk, new_brk, old_brk_limit - new_brk);
+      SET_STATUS_Success(0);
+      return;
+   }
+
+   /* We are expanding the brk segment. */
+
+   /* Fixup code if the VG_(brk_base) is not page-aligned. */
+   if (old_brk_limit < old_brk_base_pgup) {
+      /* Calculate new local brk (=MIN(new_brk, old_brk_base_pgup)). */
+      Addr new_brk_local;
+      if (new_brk < old_brk_base_pgup)
+         new_brk_local = new_brk;
+      else
+         new_brk_local = old_brk_base_pgup;
+
+      /* Find a segment at the beginning and at the end of the expanded
+         range. */
+      seg = VG_(am_find_nsegment)(old_brk_limit);
+      seg2 = VG_(am_find_nsegment)(new_brk_local - 1);
+      vg_assert(seg);
+      vg_assert(seg2);
+      vg_assert(seg == seg2);
+
+      /* Nothing else to do. */
+   }
+
+   if (new_brk > old_brk_base_pgup) {
+      /* Calculate old local brk (=MAX(old_brk_limit, old_brk_base_pgup)). */
+      Addr old_brk_local;
+      if (old_brk_limit < old_brk_base_pgup)
+         old_brk_local = old_brk_base_pgup;
+      else
+         old_brk_local = old_brk_limit;
+
+      /* Find a segment at the beginning of the expanded range. */
+      if (old_brk_local > old_brk_base_pgup)
+         seg = VG_(am_find_nsegment)(old_brk_local - 1);
+      else
+         seg = VG_(am_find_nsegment)(old_brk_local);
+      vg_assert(seg);
+      vg_assert(seg->kind == SkAnonC);
+
+      /* Find the 1-page reservation segment. */
+      seg2 = VG_(am_next_nsegment)(seg, True/*forwards*/);
+      vg_assert(seg2);
+      vg_assert(seg2->kind == SkResvn);
+      vg_assert(seg->end + 1 == seg2->start);
+      vg_assert(seg2->end - seg2->start + 1 == VKI_PAGE_SIZE);
+
+      if (new_brk <= seg2->start) {
+         /* Still fits within the existing anon segment, nothing to do. */
+      } else {
+         /* Data segment limit was already checked. */
+         Addr anon_start = seg->end + 1;
+         Addr resvn_start = VG_PGROUNDUP(new_brk);
+         SizeT anon_size = resvn_start - anon_start;
+         SizeT resvn_size = VKI_PAGE_SIZE;
+         SysRes sres;
+
+         vg_assert(VG_IS_PAGE_ALIGNED(anon_size));
+         vg_assert(VG_IS_PAGE_ALIGNED(resvn_size));
+         vg_assert(VG_IS_PAGE_ALIGNED(anon_start));
+         vg_assert(VG_IS_PAGE_ALIGNED(resvn_start));
+         vg_assert(anon_size > 0);
+
+         /* Address space manager checks for free address space for us;
+            reservation would not be otherwise created. */
+         Bool ok = VG_(am_create_reservation)(resvn_start, resvn_size, SmLower,
+                                              anon_size);
+         if (!ok) {
+            VG_(umsg)("brk segment overflow in thread #%d: can't grow "
+                      "to %#lx\n", tid, new_brk);
+            SET_STATUS_Failure(VKI_ENOMEM);
+            return;
+         }
+
+         /* Establish protection from the existing segment. */
+         UInt prot = (seg->hasR ? VKI_PROT_READ : 0)
+                     | (seg->hasW ? VKI_PROT_WRITE : 0)
+                     | (seg->hasX ? VKI_PROT_EXEC : 0);
+
+         /* Address space manager will merge old and new data segments. */
+         sres = VG_(am_mmap_anon_fixed_client)(anon_start, anon_size, prot);
+         if (sr_isError(sres)) {
+            VG_(umsg)("Cannot map memory to grow brk segment in thread #%d "
+                      "to %#lx\n", tid, new_brk);
+            SET_STATUS_Failure(VKI_ENOMEM);
+            return;
+         }
+         vg_assert(sr_Res(sres) == anon_start);
+
+         seg = VG_(am_find_nsegment)(old_brk_base_pgup);
+         seg2 = VG_(am_find_nsegment)(VG_PGROUNDUP(new_brk) - 1);
+         vg_assert(seg);
+         vg_assert(seg2);
+         vg_assert(seg == seg2);
+         vg_assert(new_brk <= seg->end + 1);
+      }
+   }
+
+   /* We are done, update VG_(brk_limit), tell the tool about the changes, and
+      leave. */
+   VG_(brk_limit) = new_brk;
+   VG_TRACK(new_mem_brk, old_brk_limit, new_brk - old_brk_limit, tid);
+   SET_STATUS_Success(0);
+}
+
+PRE(sys_stat)
+{
+   /* int stat(const char *path, struct stat *buf); */
+   /* Note: We could use here the sys_newstat generic wrapper, but the 'new'
+      in its name is rather confusing in the Solaris context, thus we provide
+      our own wrapper. */
+   PRINT("sys_stat ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
+   PRE_REG_READ2(long, "stat", const char *, path, struct stat *, buf);
+
+   PRE_MEM_RASCIIZ("stat(path)", ARG1);
+   PRE_MEM_WRITE("stat(buf)", ARG2, sizeof(struct vki_stat));
+}
+
+POST(sys_stat)
+{
+   POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
+}
+
+PRE(sys_lseek)
+{
+   /* off_t lseek(int fildes, off_t offset, int whence); */
+   PRINT("sys_lseek ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "lseek", int, fildes, vki_off_t, offset, int, whence);
+
+   /* Stay sane. */
+   if (!ML_(fd_allowed)(ARG1, "lseek", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_mount)
+{
+   /* int mount(const char *spec, const char *dir, int mflag, char *fstype,
+                char *dataptr, int datalen, char *optptr, int optlen); */
+   *flags |= SfMayBlock;
+   if (ARG3 & VKI_MS_OPTIONSTR) {
+      /* 8-argument mount */
+      PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s), %#lx, %ld, "
+            "%#lx(%s), %ld )", ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2, ARG3,
+            ARG4, (HChar *) ARG4, ARG5, ARG6, ARG7, (HChar *) ARG7, ARG8);
+      PRE_REG_READ8(long, "mount", const char *, spec, const char *, dir,
+                    int, mflag, char *, fstype, char *, dataptr, int, datalen,
+                    char *, optptr, int, optlen);
+   }
+   else if (ARG3 & VKI_MS_DATA) {
+      /* 6-argument mount */
+      PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s), %#lx, %ld )",
+            ARG1, (HChar *) ARG1, ARG2, (HChar *) ARG2, ARG3, ARG4,
+            (HChar *) ARG4, ARG5, ARG6);
+      PRE_REG_READ6(long, "mount", const char *, spec, const char *, dir,
+                    int, mflag, char *, fstype, char *, dataptr,
+                    int, datalen);
+   }
+   else {
+      /* 4-argument mount */
+      PRINT("sys_mount ( %#lx(%s), %#lx(%s), %ld, %#lx(%s) )", ARG1,
+            (HChar *) ARG1, ARG2, (HChar *) ARG2, ARG3, ARG4, (HChar *) ARG4);
+      PRE_REG_READ4(long, "mount", const char *, spec, const char *, dir,
+                    int, mflag, char *, fstype);
+   }
+   if (ARG1)
+      PRE_MEM_RASCIIZ("mount(spec)", ARG1);
+   PRE_MEM_RASCIIZ("mount(dir)", ARG2);
+   if (ARG4 && ARG4 >= 256) {
+      /* If ARG4 < 256, then it's an index to a fs table in the kernel. */
+      PRE_MEM_RASCIIZ("mount(fstype)", ARG4);
+   }
+   if (ARG3 & (VKI_MS_DATA | VKI_MS_OPTIONSTR)) {
+      if (ARG5)
+         PRE_MEM_READ("mount(dataptr)", ARG5, ARG6);
+      if ((ARG3 & VKI_MS_OPTIONSTR) && ARG7) {
+         /* in/out buffer */
+         PRE_MEM_RASCIIZ("mount(optptr)", ARG7);
+         PRE_MEM_WRITE("mount(optptr)", ARG7, ARG8);
+      }
+   }
+}
+
+POST(sys_mount)
+{
+   if (ARG3 & VKI_MS_OPTIONSTR) {
+      POST_MEM_WRITE(ARG7, VG_(strlen)((HChar*)ARG7) + 1);
+   } else if (ARG3 & VKI_MS_DATA) {
+      if ((ARG2) &&
+          (ARG3 & MS_NOMNTTAB) &&
+          (VG_STREQ((HChar *) ARG4, "namefs")) &&
+          (ARG6 == sizeof(struct vki_namefd)) &&
+          ML_(safe_to_deref)((void *) ARG5, ARG6)) {
+         /* Most likely an fattach() call for a door file descriptor. */
+         door_server_fattach(((struct vki_namefd *) ARG5)->fd, (HChar *) ARG2);
+      }
+   }
+}
+
+PRE(sys_readlinkat)
+{
+   /* ssize_t readlinkat(int dfd, const char *path, char *buf,
+                         size_t bufsiz); */
+   HChar name[30];    // large enough
+   Word saved = SYSNO;
+
+   /* Interpret the first argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int dfd = (Int) ARG1;
+
+   PRINT("sys_readlinkat ( %d, %#lx(%s), %#lx, %lu )", dfd, ARG2,
+         (HChar *) ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "readlinkat", int, dfd, const char *, path,
+                 char *, buf, int, bufsiz);
+   PRE_MEM_RASCIIZ("readlinkat(path)", ARG2);
+   PRE_MEM_WRITE("readlinkat(buf)", ARG3, ARG4);
+
+   /* Be strict but ignore dfd for absolute path. */
+   if (dfd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(dfd, "readlinkat", tid, False)) {
+      SET_STATUS_Failure(VKI_EBADF);
+      return;
+   }
+
+   /* Handle the case where readlinkat is looking at /proc/self/path/a.out or
+      /proc/<pid>/path/a.out. */
+   VG_(sprintf)(name, "/proc/%d/path/a.out", VG_(getpid)());
+   if (ML_(safe_to_deref)((void*)ARG2, 1) &&
+       (!VG_(strcmp)((HChar*)ARG2, name) ||
+        !VG_(strcmp)((HChar*)ARG2, "/proc/self/path/a.out"))) {
+      VG_(sprintf)(name, "/proc/self/path/%d", VG_(cl_exec_fd));
+      SET_STATUS_from_SysRes(VG_(do_syscall4)(saved, dfd, (UWord)name, ARG3,
+                                              ARG4));
+   }
+}
+
+POST(sys_readlinkat)
+{
+   POST_MEM_WRITE(ARG3, RES);
+}
+
+PRE(sys_stime)
+{
+   /* Kernel: int stime(time_t time); */
+   PRINT("sys_stime ( %ld )", ARG1);
+   PRE_REG_READ1(long, "stime", vki_time_t, time);
+}
+
+PRE(sys_fstat)
+{
+   /* int fstat(int fildes, struct stat *buf); */
+   /* Note: We could use here the sys_newfstat generic wrapper, but the 'new'
+      in its name is rather confusing in the Solaris context, thus we provide
+      our own wrapper. */
+   PRINT("sys_fstat ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "fstat", int, fildes, struct stat *, buf);
+   PRE_MEM_WRITE("fstat(buf)", ARG2, sizeof(struct vki_stat));
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "fstat", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_fstat)
+{
+   POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
+}
+
+#if defined(SOLARIS_FREALPATHAT_SYSCALL)
+PRE(sys_frealpathat)
+{
+   /* int frealpathat(int fd, char *path, char *buf, size_t buflen); */
+
+   /* Interpret the first argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int fd = (Int) ARG1;
+
+   PRINT("sys_frealpathat ( %d, %#lx(%s), %#lx, %ld )",
+         fd, ARG2, (HChar *) ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "frealpathat", int, fd, char *, path,
+                 char *, buf, vki_size_t, buflen);
+   PRE_MEM_RASCIIZ("frealpathat(path)", ARG2);
+   PRE_MEM_WRITE("frealpathat(buf)", ARG3, ARG4);
+
+   /* Be strict but ignore fd for absolute path. */
+   if (fd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(fd, "frealpathat", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_frealpathat)
+{
+   POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
+}
+#endif /* SOLARIS_FREALPATHAT_SYSCALL */
+
+PRE(sys_stty)
+{
+   /* int stty(int fd, const struct sgttyb *tty); */
+   PRINT("sys_stty ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "stty", int, fd,
+                 const struct vki_sgttyb *, tty);
+   PRE_MEM_READ("stty(tty)", ARG2, sizeof(struct vki_sgttyb));
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "stty", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_gtty)
+{
+   /* int gtty(int fd, struct sgttyb *tty); */
+   PRINT("sys_gtty ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "gtty", int, fd, struct vki_sgttyb *, tty);
+   PRE_MEM_WRITE("gtty(tty)", ARG2, sizeof(struct vki_sgttyb));
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "gtty", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_gtty)
+{
+   POST_MEM_WRITE(ARG2, sizeof(struct vki_sgttyb));
+}
+
+PRE(sys_pgrpsys)
+{
+   /* Kernel: int setpgrp(int flag, int pid, int pgid); */
+   switch (ARG1 /*flag*/) {
+   case 0:
+      /* Libc: pid_t getpgrp(void); */
+      PRINT("sys_pgrpsys ( %ld )", ARG1);
+      PRE_REG_READ1(long, SC2("pgrpsys", "getpgrp"), int, flag);
+      break;
+   case 1:
+      /* Libc: pid_t setpgrp(void); */
+      PRINT("sys_pgrpsys ( %ld )", ARG1);
+      PRE_REG_READ1(long, SC2("pgrpsys", "setpgrp"), int, flag);
+      break;
+   case 2:
+      /* Libc: pid_t getsid(pid_t pid); */
+      PRINT("sys_pgrpsys ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("pgrpsys", "getsid"), int, flag,
+                    vki_pid_t, pid);
+      break;
+   case 3:
+      /* Libc: pid_t setsid(void); */
+      PRINT("sys_pgrpsys ( %ld )", ARG1);
+      PRE_REG_READ1(long, SC2("pgrpsys", "setsid"), int, flag);
+      break;
+   case 4:
+      /* Libc: pid_t getpgid(pid_t pid); */
+      PRINT("sys_pgrpsys ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("pgrpsys", "getpgid"), int, flag,
+                    vki_pid_t, pid);
+      break;
+   case 5:
+      /* Libc: int setpgid(pid_t pid, pid_t pgid); */
+      PRINT("sys_pgrpsys ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("pgrpsys", "setpgid"), int, flag,
+                    vki_pid_t, pid, vki_pid_t, pgid);
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the pgrpsys call with flag %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+PRE(sys_pipe)
+{
+#if defined(SOLARIS_NEW_PIPE_SYSCALL)
+   /* int pipe(int fildes[2], int flags); */
+   PRINT("sys_pipe ( %#lx, %ld )", ARG1, ARG2);
+   PRE_REG_READ2(long, "pipe", int *, fildes, int, flags);
+   PRE_MEM_WRITE("pipe(fildes)", ARG1, 2 * sizeof(int));
+#else
+   /* longlong_t pipe(); */
+   PRINT("sys_pipe ( )");
+   PRE_REG_READ0(long, "pipe");
+#endif /* SOLARIS_NEW_PIPE_SYSCALL */
+}
+
+POST(sys_pipe)
+{
+   Int p0, p1;
+
+#if defined(SOLARIS_NEW_PIPE_SYSCALL)
+   int *fds = (int*)ARG1;
+   p0 = fds[0];
+   p1 = fds[1];
+   POST_MEM_WRITE(ARG1, 2 * sizeof(int));
+#else
+   p0 = RES;
+   p1 = RESHI;
+#endif /* SOLARIS_NEW_PIPE_SYSCALL */
+
+   if (!ML_(fd_allowed)(p0, "pipe", tid, True) ||
+       !ML_(fd_allowed)(p1, "pipe", tid, True)) {
+      VG_(close)(p0);
+      VG_(close)(p1);
+      SET_STATUS_Failure(VKI_EMFILE);
+   }
+   else if (VG_(clo_track_fds)) {
+      ML_(record_fd_open_nameless)(tid, p0);
+      ML_(record_fd_open_nameless)(tid, p1);
+   }
+}
+
+PRE(sys_faccessat)
+{
+   /* int faccessat(int fd, const char *path, int amode, int flag); */
+
+   /* Interpret the first argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int fd = (Int) ARG1;
+
+   PRINT("sys_faccessat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2,
+         (HChar *) ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "faccessat", int, fd, const char *, path,
+                 int, amode, int, flag);
+   PRE_MEM_RASCIIZ("faccessat(path)", ARG2);
+
+   /* Be strict but ignore fd for absolute path. */
+   if (fd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(fd, "faccessat", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_mknodat)
+{
+   /* int mknodat(int fd, char *fname, mode_t fmode, dev_t dev); */
+
+   /* Interpret the first argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int fd = (Int) ARG1;
+
+   PRINT("sys_mknodat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2,
+         (HChar *) ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "mknodat", int, fd, const char *, fname,
+                 vki_mode_t, fmode, vki_dev_t, dev);
+   PRE_MEM_RASCIIZ("mknodat(fname)", ARG2);
+
+   /* Be strict but ignore fd for absolute path. */
+   if (fd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(fd, "mknodat", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+
+   *flags |= SfMayBlock;
+}
+
+POST(sys_mknodat)
+{
+   if (!ML_(fd_allowed)(RES, "mknodat", tid, True)) {
+      VG_(close)(RES);
+      SET_STATUS_Failure(VKI_EMFILE);
+   } else if (VG_(clo_track_fds))
+      ML_(record_fd_open_with_given_name)(tid, RES, (HChar *) ARG2);
+}
+
+PRE(sys_sysi86)
+{
+   /* int sysi86(int cmd, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3); */
+   PRINT("sys_sysi86 ( %ld, %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "sysi86", int, cmd, uintptr_t, arg1, uintptr_t, arg2,
+                 uintptr_t, arg3);
+
+   switch (ARG1 /*cmd*/) {
+   case VKI_SI86FPSTART:
+      PRE_MEM_WRITE("sysi86(fp_hw)", ARG2, sizeof(vki_uint_t));
+      /* ARG3 is a desired x87 FCW value, ARG4 is a desired SSE MXCSR value.
+         They are passed to the kernel but V will change them later anyway
+         (this is a general Valgrind limitation described in the official
+         documentation). */
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the sysi86 call with cmd %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_sysi86)
+{
+   switch (ARG1 /*cmd*/) {
+   case VKI_SI86FPSTART:
+      POST_MEM_WRITE(ARG2, sizeof(vki_uint_t));
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_shmsys)
+{
+   /* Kernel: uintptr_t shmsys(int opcode, uintptr_t a0, uintptr_t a1,
+                               uintptr_t a2, uintptr_t a3);
+    */
+   *flags |= SfMayBlock;
+
+   switch (ARG1 /*opcode*/) {
+   case VKI_SHMAT:
+      /* Libc: void *shmat(int shmid, const void *shmaddr, int shmflg); */
+      PRINT("sys_shmsys ( %ld, %ld, %#lx, %ld )",
+            ARG1, ARG2, ARG3, ARG4);
+      PRE_REG_READ4(long, SC2("shmsys", "shmat"), int, opcode,
+                    int, shmid, const void *, shmaddr, int, shmflg);
+
+      UWord addr = ML_(generic_PRE_sys_shmat)(tid, ARG2, ARG3, ARG4);
+      if (addr == 0)
+         SET_STATUS_Failure(VKI_EINVAL);
+      else
+         ARG3 = addr;
+      break;
+
+   case VKI_SHMCTL:
+      /* Libc: int shmctl(int shmid, int cmd, struct shmid_ds *buf); */
+      switch (ARG3 /* cmd */) {
+      case VKI_SHM_LOCK:
+         PRINT("sys_shmsys ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
+         PRE_REG_READ3(long, SC3("shmsys", "shmctl", "lock"),
+                       int, opcode, int, shmid, int, cmd);
+         break;
+      case VKI_SHM_UNLOCK:
+         PRINT("sys_shmsys ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
+         PRE_REG_READ3(long, SC3("shmsys", "shmctl", "unlock"),
+                       int, opcode, int, shmid, int, cmd);
+         break;
+      case VKI_IPC_RMID:
+         PRINT("sys_shmsys ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
+         PRE_REG_READ3(long, SC3("shmsys", "shmctl", "rmid"),
+                       int, opcode, int, shmid, int, cmd);
+         break;
+      case VKI_IPC_SET:
+         PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
+               ARG1, ARG2, ARG3, ARG4);
+         PRE_REG_READ4(long, SC3("shmsys", "shmctl", "set"),
+                       int, opcode, int, shmid, int, cmd,
+                       struct vki_shmid_ds *, buf);
+
+         struct vki_shmid_ds *buf = (struct vki_shmid_ds *) ARG4;
+         PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.uid)",
+                        buf->shm_perm.uid);
+         PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.gid)",
+                        buf->shm_perm.gid);
+         PRE_FIELD_READ("shmsys(shmctl, ipc_set, buf->shm_perm.mode)",
+                        buf->shm_perm.mode);
+         break;
+      case VKI_IPC_STAT:
+         PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
+               ARG1, ARG2, ARG3, ARG4);
+         PRE_REG_READ4(long, SC3("shmsys", "shmctl", "stat"),
+                       int, opcode, int, shmid, int, cmd,
+                       struct vki_shmid_ds *, buf);
+         PRE_MEM_WRITE("shmsys(shmctl, ipc_stat, buf)", ARG4,
+                       sizeof(struct vki_shmid_ds));
+        break;
+      case VKI_IPC_SET64:
+         PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
+               ARG1, ARG2, ARG3, ARG4);
+         PRE_REG_READ4(long, SC3("shmsys", "shmctl", "set64"),
+                       int, opcode, int, shmid, int, cmd,
+                       struct vki_shmid_ds64 *, buf);
+
+         struct vki_shmid_ds64 *buf64 = (struct vki_shmid_ds64 *) ARG4;
+         PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
+                        "buf->shmx_perm.ipcx_uid)",
+                        buf64->shmx_perm.ipcx_uid);
+         PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
+                        "buf->shmx_perm.ipcx_gid)",
+                        buf64->shmx_perm.ipcx_gid);
+         PRE_FIELD_READ("shmsys(shmctl, ipc_set64, "
+                        "buf->shmx_perm.ipcx_mode)",
+                        buf64->shmx_perm.ipcx_mode);
+         break;
+      case VKI_IPC_STAT64:
+         PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
+               ARG1, ARG2, ARG3, ARG4);
+         PRE_REG_READ4(long, SC3("shmsys", "shmctl", "stat64"),
+                       int, opcode, int, shmid, int, cmd,
+                       struct vki_shmid_ds64 *, buf);
+         PRE_MEM_WRITE("shmsys(shmctl, ipc_stat64, buf)", ARG4,
+                       sizeof(struct vki_shmid_ds64));
+         break;
+#if defined(SOLARIS_SHM_NEW)
+      case VKI_IPC_XSTAT64:
+         PRINT("sys_shmsys ( %ld, %ld, %ld, %#lx )",
+               ARG1, ARG2, ARG3, ARG4);
+         PRE_REG_READ4(long, SC3("shmsys", "shmctl", "xstat64"),
+                       int, opcode, int, shmid, int, cmd,
+                       struct vki_shmid_ds64 *, buf);
+         PRE_MEM_WRITE("shmsys(shmctl, ipc_xstat64, buf)", ARG4,
+                       sizeof(struct vki_shmid_xds64));
+         break;
+#endif /* SOLARIS_SHM_NEW */
+      default:
+         VG_(unimplemented)("Syswrap of the shmsys(shmctl) call with "
+                            "cmd %ld.", ARG3);
+         /*NOTREACHED*/
+         break;
+      }
+      break;
+
+   case VKI_SHMDT:
+      /* Libc: int shmdt(const void *shmaddr); */
+      PRINT("sys_shmsys ( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("shmsys", "shmdt"), int, opcode,
+                    const void *, shmaddr);
+
+      if (!ML_(generic_PRE_sys_shmdt)(tid, ARG2))
+	 SET_STATUS_Failure(VKI_EINVAL);
+      break;
+
+   case VKI_SHMGET:
+      /* Libc: int shmget(key_t key, size_t size, int shmflg); */
+      PRINT("sys_shmsys ( %ld, %ld, %ld, %ld )",
+            ARG1, ARG2, ARG3, ARG4);
+      PRE_REG_READ4(long, SC2("shmsys", "shmget"), int, opcode,
+                    vki_key_t, key, vki_size_t, size, int, shmflg);
+      break;
+
+   case VKI_SHMIDS:
+      /* Libc: int shmids(int *buf, uint_t nids, uint_t *pnids); */
+      PRINT("sys_shmsys ( %ld, %#lx, %ld, %#lx )",
+            ARG1, ARG2, ARG3, ARG4);
+      PRE_REG_READ4(long, SC2("shmsys", "shmids"), int, opcode,
+                    int *, buf, vki_uint_t, nids, vki_uint_t *, pnids);
+
+      PRE_MEM_WRITE("shmsys(shmids, buf)", ARG2, ARG3 * sizeof(int *));
+      PRE_MEM_WRITE("shmsys(shmids, pnids)", ARG4, sizeof(vki_uint_t));
+      break;
+
+#if defined(SOLARIS_SHM_NEW)
+   case VKI_SHMADV:
+      /* Libc: int shmadv(int shmid, uint_t cmd, uint_t *advice); */
+      PRINT("sys_shmsys ( %ld, %ld, %ld, %ld )",
+            ARG1, ARG2, ARG3, ARG4);
+      PRE_REG_READ4(long, SC2("shmsys", "shmadv"), int, opcode,
+                    int, shmid, vki_uint_t, cmd, vki_uint_t *, advice);
+
+      switch (ARG3 /*cmd*/) {
+      case VKI_SHM_ADV_GET:
+         PRE_MEM_WRITE("shmsys(shmadv, advice)", ARG4,
+                       sizeof(vki_uint_t));
+         break;
+      case VKI_SHM_ADV_SET:
+         PRE_MEM_READ("shmsys(shmadv, advice)", ARG4,
+                       sizeof(vki_uint_t));
+         break;
+      default:
+         VG_(unimplemented)("Syswrap of the shmsys(shmadv) call with "
+                            "cmd %ld.", ARG3);
+         /*NOTREACHED*/
+         break;
+      }
+      break;
+
+   case VKI_SHMGET_OSM:
+      /* Libc: int shmget_osm(key_t key, size_t size, int shmflg,
+                              size_t granule_sz);
+       */
+      PRINT("sys_shmsys ( %ld, %ld, %ld, %ld, %ld )",
+            ARG1, ARG2, ARG3, ARG4, ARG5);
+      PRE_REG_READ5(long, SC2("shmsys", "shmget_osm"), int, opcode,
+                    vki_key_t, key, vki_size_t, size, int, shmflg,
+                    vki_size_t, granule_sz);
+      break;
+#endif /* SOLARIS_SHM_NEW */
+
+   default:
+      VG_(unimplemented)("Syswrap of the shmsys call with opcode %ld.",
+                         ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_shmsys)
+{
+   switch (ARG1 /*opcode*/) {
+   case VKI_SHMAT:
+      ML_(generic_POST_sys_shmat)(tid, RES, ARG2, ARG3, ARG4);
+      break;
+
+   case VKI_SHMCTL:
+      switch (ARG3 /*cmd*/) {
+      case VKI_SHM_LOCK:
+      case VKI_SHM_UNLOCK:
+      case VKI_IPC_RMID:
+      case VKI_IPC_SET:
+         break;
+      case VKI_IPC_STAT:
+         POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_ds));
+         break;
+      case VKI_IPC_SET64:
+         break;
+      case VKI_IPC_STAT64:
+         POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_ds64));
+         break;
+#if defined(SOLARIS_SHM_NEW)
+      case VKI_IPC_XSTAT64:
+         POST_MEM_WRITE(ARG4, sizeof(struct vki_shmid_xds64));
+         break;
+#endif /* SOLARIS_SHM_NEW */
+      default:
+         vg_assert(0);
+         break;
+      }
+      break;
+
+   case VKI_SHMDT:
+      ML_(generic_POST_sys_shmdt)(tid, RES, ARG2);
+      break;
+
+   case VKI_SHMGET:
+      break;
+
+   case VKI_SHMIDS:
+      {
+         POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
+
+         uint_t *pnids = (vki_uint_t *) ARG4;
+         if (*pnids <= ARG3)
+            POST_MEM_WRITE(ARG2, *pnids * sizeof(int *));
+      }
+      break;
+
+#if defined(SOLARIS_SHM_NEW)
+   case VKI_SHMADV:
+      switch (ARG3 /*cmd*/) {
+      case VKI_SHM_ADV_GET:
+         POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
+         break;
+      case VKI_SHM_ADV_SET:
+         break;
+      default:
+         vg_assert(0);
+         break;
+      }
+      break;
+
+   case VKI_SHMGET_OSM:
+      break;
+#endif /* SOLARIS_SHM_NEW */
+
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_semsys)
+{
+   /* Kernel: int semsys(int opcode, uintptr_t a1, uintptr_t a2, uintptr_t a3,
+                         uintptr_t a4);
+    */
+   *flags |= SfMayBlock;
+
+   switch (ARG1 /*opcode*/) {
+   case VKI_SEMCTL:
+      /* Libc: int semctl(int semid, int semnum, int cmd...); */
+      switch (ARG4) {
+         case VKI_IPC_STAT:
+            PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
+                  ARG1, ARG2, ARG3, ARG4, ARG5);
+            PRE_REG_READ5(long, SC3("semsys", "semctl", "stat"), int, opcode,
+                          int, semid, int, semnum, int, cmd,
+                          struct vki_semid_ds *, arg);
+            break;
+         case VKI_IPC_SET:
+            PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
+                  ARG1, ARG2, ARG3, ARG4, ARG5);
+            PRE_REG_READ5(long, SC3("semsys", "semctl", "set"), int, opcode,
+                          int, semid, int, semnum, int, cmd,
+                          struct vki_semid_ds *, arg);
+            break;
+         case VKI_IPC_STAT64:
+            PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
+                  ARG1, ARG2, ARG3, ARG4, ARG5);
+            PRE_REG_READ5(long, SC3("semsys", "semctl", "stat64"), int, opcode,
+                          int, semid, int, semnum, int, cmd,
+                          struct vki_semid64_ds *, arg);
+            break;
+         case VKI_IPC_SET64:
+            PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
+                  ARG1, ARG2, ARG3, ARG4, ARG5);
+            PRE_REG_READ5(long, SC3("semsys", "semctl", "set64"), int, opcode,
+                          int, semid, int, semnum, int, cmd,
+                          struct vki_semid64_ds *, arg);
+            break;
+         case VKI_IPC_RMID:
+            PRINT("sys_semsys ( %ld, %ld, %ld )", ARG1, ARG3, ARG4);
+            PRE_REG_READ3(long, SC3("semsys", "semctl", "rmid"), int, opcode,
+                          int, semid, int, cmd);
+            break;
+         case VKI_GETALL:
+            PRINT("sys_semsys ( %ld, %ld, %ld, %#lx )",
+                  ARG1, ARG2, ARG4, ARG5);
+            PRE_REG_READ4(long, SC3("semsys", "semctl", "getall"), int, opcode,
+                          int, semid, int, cmd, ushort_t *, arg);
+            break;
+         case VKI_SETALL:
+            PRINT("sys_semsys ( %ld, %ld, %ld, %#lx )",
+                  ARG1, ARG2, ARG4, ARG5);
+            PRE_REG_READ4(long, SC3("semsys", "semctl", "setall"), int, opcode,
+                          int, semid, int, cmd, ushort_t *, arg);
+            break;
+         case VKI_GETVAL:
+            PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
+                  ARG1, ARG2, ARG3, ARG4);
+            PRE_REG_READ4(long, SC3("semsys", "semctl", "getval"), int, opcode,
+                          int, semid, int, semnum, int, cmd);
+            break;
+         case VKI_SETVAL:
+            PRINT("sys_semsys ( %ld, %ld, %ld, %ld, %#lx )",
+                  ARG1, ARG2, ARG3, ARG4, ARG5);
+            PRE_REG_READ5(long, SC3("semsys", "semctl", "setval"), int, opcode,
+                          int, semid, int, semnum, int, cmd,
+                          union vki_semun *, arg);
+            break;
+         case VKI_GETPID:
+            PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
+                  ARG1, ARG2, ARG3, ARG4);
+            PRE_REG_READ4(long, SC3("semsys", "semctl", "getpid"), int, opcode,
+                          int, semid, int, semnum, int, cmd);
+            break;
+         case VKI_GETNCNT:
+            PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
+                  ARG1, ARG2, ARG3, ARG4);
+            PRE_REG_READ4(long, SC3("semsys", "semctl", "getncnt"),
+                          int, opcode, int, semid, int, semnum, int, cmd);
+            break;
+         case VKI_GETZCNT:
+            PRINT("sys_semsys ( %ld, %ld, %ld, %ld )",
+                  ARG1, ARG2, ARG3, ARG4);
+            PRE_REG_READ4(long, SC3("semsys", "semctl", "getzcnt"),
+                          int, opcode, int, semid, int, semnum, int, cmd);
+            break;
+         default:
+            VG_(unimplemented)("Syswrap of the semsys(semctl) call "
+                               "with cmd %ld.", ARG4);
+            /*NOTREACHED*/
+            break;
+      }
+      ML_(generic_PRE_sys_semctl)(tid, ARG2, ARG3, ARG4, ARG5);
+      break;
+   case VKI_SEMGET:
+      /* Libc: int semget(key_t key, int nsems, int semflg); */
+      PRINT("sys_semsys ( %ld, %ld, %ld, %ld )", ARG1, ARG2, ARG3, ARG4);
+      PRE_REG_READ4(long, SC2("semsys", "semget"), int, opcode,
+                    vki_key_t, key, int, nsems, int, semflg);
+      break;
+   case VKI_SEMOP:
+      /* Libc: int semop(int semid, struct sembuf *sops, size_t nsops); */
+      PRINT("sys_semsys ( %ld, %ld, %#lx, %lu )", ARG1, ARG2, ARG3, ARG4);
+      PRE_REG_READ4(long, SC2("semsys", "semop"), int, opcode, int, semid,
+                    struct vki_sembuf *, sops, vki_size_t, nsops);
+      ML_(generic_PRE_sys_semop)(tid, ARG2, ARG3, ARG4);
+      break;
+   case VKI_SEMIDS:
+      /* Libc: int semids(int *buf, uint_t nids, uint_t *pnids); */
+      PRINT("sys_semsys ( %ld, %#lx, %ld, %#lx )", ARG1, ARG2, ARG3, ARG4);
+      PRE_REG_READ4(long, SC2("semsys", "semids"), int, opcode, int *, buf,
+                   vki_uint_t, nids, vki_uint_t *, pnids);
+
+      PRE_MEM_WRITE("semsys(semids, buf)", ARG2, ARG3 * sizeof(int *));
+      PRE_MEM_WRITE("semsys(semids, pnids)", ARG4, sizeof(vki_uint_t));
+      break;
+   case VKI_SEMTIMEDOP:
+      /* Libc: int semtimedop(int semid, struct sembuf *sops, size_t nsops,
+                              const struct timespec *timeout);
+       */
+      PRINT("sys_semsys ( %ld, %ld, %#lx, %lu, %#lx )", ARG1, ARG2, ARG3,
+            ARG4, ARG5);
+      PRE_REG_READ5(long, SC2("semsys", "semtimedop"), int, opcode,
+                    int, semid, struct vki_sembuf *, sops, vki_size_t, nsops,
+                    struct vki_timespec *, timeout);
+      ML_(generic_PRE_sys_semtimedop)(tid, ARG2, ARG3, ARG4, ARG5);
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the semsys call with opcode %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_semsys)
+{
+   switch (ARG1 /*opcode*/) {
+   case VKI_SEMCTL:
+      ML_(generic_POST_sys_semctl)(tid, RES, ARG2, ARG3, ARG4, ARG5);
+      break;
+   case VKI_SEMGET:
+   case VKI_SEMOP:
+      break;
+   case VKI_SEMIDS:
+      {
+         POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
+
+         uint_t *pnids = (uint_t *)ARG4;
+         if (*pnids <= ARG3)
+            POST_MEM_WRITE(ARG2, *pnids * sizeof(int *));
+      }
+      break;
+   case VKI_SEMTIMEDOP:
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+/* ---------------------------------------------------------------------
+   ioctl wrappers
+   ------------------------------------------------------------------ */
+
+PRE(sys_ioctl)
+{
+   /* int ioctl(int fildes, int request, ...); */
+   *flags |= SfMayBlock;
+
+   /* Prevent sign extending the switch case values to 64-bits on 64-bits
+      architectures. */
+   Int cmd = (Int) ARG2;
+
+   switch (cmd /*request*/) {
+      /* Handle 2-arg specially here (they do not use ARG3 at all). */
+   case VKI_TIOCNOTTY:
+   case VKI_TIOCSCTTY:
+      PRINT("sys_ioctl ( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, "ioctl", int, fd, int, request);
+      break;
+      /* And now come the 3-arg ones. */
+   default:
+      PRINT("sys_ioctl ( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, "ioctl", int, fd, int, request, intptr_t, arg);
+      break;
+   }
+
+   switch (cmd /*request*/) {
+   /* pools */
+   case VKI_POOL_STATUSQ:
+      PRE_MEM_WRITE("ioctl(POOL_STATUSQ)", ARG3, sizeof(vki_pool_status_t));
+      break;
+
+   /* mntio */
+   case VKI_MNTIOC_GETMNTANY:
+      {
+         PRE_MEM_READ("ioctl(MNTIOC_GETMNTANY)",
+                      ARG3, sizeof(struct vki_mntentbuf));
+
+         struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
+         if (ML_(safe_to_deref(embuf, sizeof(*embuf)))) {
+            PRE_MEM_READ("ioctl(MNTIOC_GETMNTANY, embuf->mbuf_emp)",
+                         (Addr) embuf->mbuf_emp,
+                         sizeof(struct vki_mnttab));
+            PRE_MEM_WRITE("ioctl(MNTIOC_GETMNTANY, embuf->mbuf_buf)",
+                          (Addr) embuf->mbuf_buf,
+                          embuf->mbuf_bufsize);
+            struct vki_mnttab *mnt
+               = (struct vki_mnttab *) embuf->mbuf_emp;
+            if (ML_(safe_to_deref(mnt, sizeof(struct vki_mnttab)))) {
+               if (mnt->mnt_special != NULL)
+                  PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_special)",
+                                  (Addr) mnt->mnt_special);
+               if (mnt->mnt_mountp != NULL)
+                  PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_mountp)",
+                                  (Addr) mnt->mnt_mountp);
+               if (mnt->mnt_fstype != NULL)
+                  PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_fstype)",
+                                  (Addr) mnt->mnt_fstype);
+               if (mnt->mnt_mntopts != NULL)
+                  PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_mntopts)",
+                                  (Addr) mnt->mnt_mntopts);
+               if (mnt->mnt_time != NULL)
+                  PRE_MEM_RASCIIZ("ioctl(MNTIOC_GETMNTANY, mnt->mnt_time)",
+                                  (Addr) mnt->mnt_time);
+            }
+         }
+      }
+      break;
+
+   /* termio/termios */
+   case VKI_TCGETA:
+      PRE_MEM_WRITE("ioctl(TCGETA)", ARG3, sizeof(struct vki_termio));
+      break;
+   case VKI_TCGETS:
+      PRE_MEM_WRITE("ioctl(TCGETS)", ARG3, sizeof(struct vki_termios));
+      break;
+   case VKI_TCSETS:
+      PRE_MEM_READ("ioctl(TCSETS)", ARG3, sizeof(struct vki_termios));
+      break;
+   case VKI_TCSETSW:
+      PRE_MEM_READ("ioctl(TCSETSW)", ARG3, sizeof(struct vki_termios));
+      break;
+   case VKI_TCSETSF:
+      PRE_MEM_READ("ioctl(TCSETSF)", ARG3, sizeof(struct vki_termios));
+      break;
+   case VKI_TIOCGWINSZ:
+      PRE_MEM_WRITE("ioctl(TIOCGWINSZ)", ARG3, sizeof(struct vki_winsize));
+      break;
+   case VKI_TIOCSWINSZ:
+      PRE_MEM_READ("ioctl(TIOCSWINSZ)", ARG3, sizeof(struct vki_winsize));
+      break;
+   case VKI_TIOCGPGRP:
+      PRE_MEM_WRITE("ioctl(TIOCGPGRP)", ARG3, sizeof(vki_pid_t));
+      break;
+   case VKI_TIOCSPGRP:
+      PRE_MEM_READ("ioctl(TIOCSPGRP)", ARG3, sizeof(vki_pid_t));
+      break;
+   case VKI_TIOCGSID:
+      PRE_MEM_WRITE("ioctl(TIOCGSID)", ARG3, sizeof(vki_pid_t));
+      break;
+   case VKI_TIOCNOTTY:
+   case VKI_TIOCSCTTY:
+      break;
+
+   /* STREAMS */
+   case VKI_I_PUSH:
+      PRE_MEM_RASCIIZ("ioctl(I_PUSH)", ARG3);
+      break;
+   case VKI_I_STR:
+      {
+         PRE_MEM_READ("ioctl(I_STR)", ARG3, sizeof(struct vki_strioctl));
+
+         struct vki_strioctl *p = (struct vki_strioctl *) ARG3;
+         if (ML_(safe_to_deref(p, sizeof(*p)))) {
+            if ((p->ic_dp != NULL) && (p->ic_len > 0)) {
+               PRE_MEM_READ("ioctl(I_STR, strioctl->ic_dp)",
+                            (Addr) p->ic_dp, p->ic_len);
+            }
+         }
+      }
+      break;
+   case VKI_I_PEEK:
+      {
+         /* Try hard not to mark strpeek->*buf.len members as being read. */
+         struct vki_strpeek *p = (struct vki_strpeek*)ARG3;
+
+         PRE_FIELD_READ("ioctl(I_PEEK, strpeek->ctlbuf.maxlen)",
+                        p->ctlbuf.maxlen);
+         PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->ctlbuf.len)",
+                         p->ctlbuf.len);
+         PRE_FIELD_READ("ioctl(I_PEEK, strpeek->ctlbuf.buf)",
+                        p->ctlbuf.buf);
+         PRE_FIELD_READ("ioctl(I_PEEK, strpeek->databuf.maxlen)",
+                        p->databuf.maxlen);
+         PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->databuf.len)",
+                         p->databuf.len);
+         PRE_FIELD_READ("ioctl(I_PEEK, strpeek->databuf.buf)",
+                        p->databuf.buf);
+         PRE_FIELD_READ("ioctl(I_PEEK, strpeek->flags)", p->flags);
+         /*PRE_FIELD_WRITE("ioctl(I_PEEK, strpeek->flags)", p->flags);*/
+
+         if (ML_(safe_to_deref(p, sizeof(*p)))) {
+            if (p->ctlbuf.buf && p->ctlbuf.maxlen > 0)
+               PRE_MEM_WRITE("ioctl(I_PEEK, strpeek->ctlbuf.buf)",
+                             (Addr)p->ctlbuf.buf, p->ctlbuf.maxlen);
+            if (p->databuf.buf && p->databuf.maxlen > 0)
+               PRE_MEM_WRITE("ioctl(I_PEEK, strpeek->databuf.buf)",
+                             (Addr)p->databuf.buf, p->databuf.maxlen);
+         }
+      }
+      break;
+   case VKI_I_CANPUT:
+      break;
+
+   /* sockio */
+   case VKI_SIOCGLIFNUM:
+      {
+         struct vki_lifnum *p = (struct vki_lifnum *) ARG3;
+         PRE_FIELD_READ("ioctl(SIOCGLIFNUM, lifn->lifn_family)",
+                        p->lifn_family);
+         PRE_FIELD_READ("ioctl(SIOCGLIFNUM, lifn->lifn_flags)",
+                        p->lifn_flags);
+         PRE_FIELD_WRITE("ioctl(SIOCGLIFNUM, lifn->lifn_count)",
+                         p->lifn_count);
+      }
+      break;  
+
+   /* filio */
+   case VKI_FIOSETOWN:
+      PRE_MEM_READ("ioctl(FIOSETOWN)", ARG3, sizeof(vki_pid_t));
+      break;
+   case VKI_FIOGETOWN:
+      PRE_MEM_WRITE("ioctl(FIOGETOWN)", ARG3, sizeof(vki_pid_t));
+      break;
+
+   /* CRYPTO */
+   case VKI_CRYPTO_GET_PROVIDER_LIST:
+      {
+         vki_crypto_get_provider_list_t *pl =
+            (vki_crypto_get_provider_list_t *) ARG3;
+         PRE_FIELD_READ("ioctl(CRYPTO_GET_PROVIDER_LIST, pl->pl_count)",
+                        pl->pl_count);
+
+         if (ML_(safe_to_deref)(pl, sizeof(*pl))) {
+            PRE_MEM_WRITE("ioctl(CRYPTO_GET_PROVIDER_LIST)", ARG3,
+                          MAX(1, pl->pl_count) *
+                          sizeof(vki_crypto_get_provider_list_t));
+         }
+         /* Save the requested count to unused ARG4 below,
+            when we know pre-handler succeeded.
+          */
+      }
+      break; 
+
+   /* dtrace */
+   case VKI_DTRACEHIOC_REMOVE:
+      break;
+   case VKI_DTRACEHIOC_ADDDOF:
+      {
+         vki_dof_helper_t *dh = (vki_dof_helper_t *) ARG3;
+         PRE_MEM_RASCIIZ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_mod)",
+                         (Addr) dh->dofhp_mod);
+         PRE_FIELD_READ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_addr",
+                        dh->dofhp_addr);
+         PRE_FIELD_READ("ioctl(DTRACEHIOC_ADDDOF, dh->dofhp_dof",
+                        dh->dofhp_dof);
+      }
+      break;
+
+   default:
+      ML_(PRE_unknown_ioctl)(tid, ARG2, ARG3);
+      break;
+   }
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "ioctl", tid, False)) {
+      SET_STATUS_Failure(VKI_EBADF);
+   } else if (ARG2 == VKI_CRYPTO_GET_PROVIDER_LIST) {
+      /* Save the requested count to unused ARG4 now. */
+      ARG4 = ARG3;
+   }
+}
+
+POST(sys_ioctl)
+{
+   /* Prevent sign extending the switch case values to 64-bits on 64-bits
+      architectures. */
+   Int cmd = (Int) ARG2;
+
+   switch (cmd /*request*/) {
+   /* pools */
+   case VKI_POOL_STATUSQ:
+      POST_MEM_WRITE(ARG3, sizeof(vki_pool_status_t));
+      break;
+
+   /* mntio */
+   case VKI_MNTIOC_GETMNTANY:
+      {
+         struct vki_mntentbuf *embuf = (struct vki_mntentbuf *) ARG3;
+         struct vki_mnttab *mnt = (struct vki_mnttab *) embuf->mbuf_emp;
+
+         POST_MEM_WRITE((Addr) mnt, sizeof(struct vki_mnttab));
+         if (mnt != NULL) {
+            if (mnt->mnt_special != NULL)
+               POST_MEM_WRITE((Addr) mnt->mnt_special,
+                              VG_(strlen)(mnt->mnt_special) + 1);
+            if (mnt->mnt_mountp != NULL)
+               POST_MEM_WRITE((Addr) mnt->mnt_mountp,
+                              VG_(strlen)(mnt->mnt_mountp) + 1);
+            if (mnt->mnt_fstype != NULL)
+               POST_MEM_WRITE((Addr) mnt->mnt_fstype,
+                              VG_(strlen)(mnt->mnt_fstype) + 1);
+            if (mnt->mnt_mntopts != NULL)
+               POST_MEM_WRITE((Addr) mnt->mnt_mntopts,
+                              VG_(strlen)(mnt->mnt_mntopts) + 1);
+            if (mnt->mnt_time != NULL)
+               POST_MEM_WRITE((Addr) mnt->mnt_time,
+                              VG_(strlen)(mnt->mnt_time) + 1);
+         }
+      }
+      break;
+
+   /* termio/termios */
+   case VKI_TCGETA:
+      POST_MEM_WRITE(ARG3, sizeof(struct vki_termio));
+      break;
+   case VKI_TCGETS:
+      POST_MEM_WRITE(ARG3, sizeof(struct vki_termios));
+      break;
+   case VKI_TCSETS:
+      break;
+   case VKI_TCSETSW:
+      break;
+   case VKI_TCSETSF:
+      break;
+   case VKI_TIOCGWINSZ:
+      POST_MEM_WRITE(ARG3, sizeof(struct vki_winsize));
+      break;
+   case VKI_TIOCSWINSZ:
+      break;
+   case VKI_TIOCGPGRP:
+      POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
+      break;
+   case VKI_TIOCSPGRP:
+      break;
+   case VKI_TIOCGSID:
+      POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
+      break;
+   case VKI_TIOCNOTTY:
+   case VKI_TIOCSCTTY:
+      break;
+
+   /* STREAMS */
+   case VKI_I_PUSH:
+      break;
+   case VKI_I_STR:
+      {
+         struct vki_strioctl *p = (struct vki_strioctl *) ARG3;
+
+         POST_FIELD_WRITE(p->ic_len);
+         if ((p->ic_dp != NULL) && (p->ic_len > 0))
+            POST_MEM_WRITE((Addr) p->ic_dp, p->ic_len);
+      }
+      break;
+   case VKI_I_PEEK:
+      {
+         struct vki_strpeek *p = (struct vki_strpeek*)ARG3;
+
+         POST_FIELD_WRITE(p->ctlbuf.len);
+         POST_FIELD_WRITE(p->databuf.len);
+         POST_FIELD_WRITE(p->flags);
+
+         if (p->ctlbuf.buf && p->ctlbuf.len > 0)
+            POST_MEM_WRITE((Addr)p->ctlbuf.buf, p->ctlbuf.len);
+         if (p->databuf.buf && p->databuf.len > 0)
+            POST_MEM_WRITE((Addr)p->databuf.buf, p->databuf.len);
+      }
+      break;
+   case VKI_I_CANPUT:
+      break;
+
+   /* sockio */
+   case VKI_SIOCGLIFNUM:
+      {
+         struct vki_lifnum *p = (struct vki_lifnum *) ARG3;
+         POST_FIELD_WRITE(p->lifn_count);
+      }
+      break;  
+
+   /* filio */
+   case VKI_FIOSETOWN:
+      break;
+   case VKI_FIOGETOWN:
+      POST_MEM_WRITE(ARG3, sizeof(vki_pid_t));
+      break;
+
+   /* CRYPTO */
+   case VKI_CRYPTO_GET_PROVIDER_LIST:
+      {
+         vki_crypto_get_provider_list_t *pl =
+            (vki_crypto_get_provider_list_t *) ARG3;
+
+         POST_FIELD_WRITE(pl->pl_count);
+         POST_FIELD_WRITE(pl->pl_return_value);
+
+         if ((ARG4 > 0) && (pl->pl_return_value == VKI_CRYPTO_SUCCESS))
+            POST_MEM_WRITE((Addr) pl->pl_list, pl->pl_count *
+                           sizeof(vki_crypto_provider_entry_t));
+      }
+      break;
+
+   /* dtrace */
+   case VKI_DTRACEHIOC_REMOVE:
+   case VKI_DTRACEHIOC_ADDDOF:
+      break;
+
+   default:
+      /* Not really anything to do since ioctl direction hints are hardly used
+         on Solaris. */
+      break;
+   }
+}
+
+PRE(sys_fchownat)
+{
+   /* int fchownat(int fd, const char *path, uid_t owner, gid_t group,
+                   int flag); */
+
+   /* Interpret the first argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int fd = (Int) ARG1;
+
+   PRINT("sys_fchownat ( %d, %#lx(%s), %ld, %ld, %ld )", fd,
+         ARG2, (HChar *) ARG2, ARG3, ARG4, ARG5);
+   PRE_REG_READ5(long, "fchownat", int, fd, const char *, path,
+                 vki_uid_t, owner, vki_gid_t, group, int, flag);
+
+   if (ARG2)
+      PRE_MEM_RASCIIZ("fchownat(path)", ARG2);
+
+   /* Be strict but ignore fd for absolute path. */
+   if (fd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(fd, "fchownat", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_fdsync)
+{
+   /* int fdsync(int fd, int flag); */
+   PRINT("sys_fdsync ( %ld, %ld )", ARG1, ARG2);
+   PRE_REG_READ2(long, "fdsync", int, fd, int, flag);
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "fdsync", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_execve)
+{
+   Int i, j;
+   /* This is a Solaris specific version of the generic pre-execve wrapper. */
+
+#if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
+   /* int execve(uintptr_t file, const char **argv, const char **envp,
+                 int flags); */
+   PRINT("sys_execve ( %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "execve", uintptr_t, file, const char **, argv,
+                 const char **, envp, int, flags);
+
+#else
+
+   /* int execve(const char *fname, const char **argv, const char **envp); */
+   PRINT("sys_execve ( %#lx(%s), %#lx, %#lx )",
+         ARG1, (HChar *) ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "execve", const char *, file, const char **, argv,
+                 const char **, envp);
+#endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
+
+   Bool ARG1_is_fd = False;
+#if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
+   if (ARG4 & VKI_EXEC_DESCRIPTOR) {
+      ARG1_is_fd = True;
+   }
+#endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
+
+   if (ARG1_is_fd == False)
+      PRE_MEM_RASCIIZ("execve(filename)", ARG1);
+   if (ARG2)
+      ML_(pre_argv_envp)(ARG2, tid, "execve(argv)", "execve(argv[i])");
+   if (ARG3)
+      ML_(pre_argv_envp)(ARG3, tid, "execve(envp)", "execve(envp[i])");
+
+   /* Erk.  If the exec fails, then the following will have made a mess of
+      things which makes it hard for us to continue.  The right thing to do is
+      piece everything together again in POST(execve), but that's close to
+      impossible.  Instead, we make an effort to check that the execve will
+      work before actually doing it. */
+
+   const HChar *fname = (const HChar *) ARG1;
+   if (ARG1_is_fd) {
+      if (!ML_(fd_allowed)(ARG1, "execve", tid, False)) {
+         SET_STATUS_Failure(VKI_EBADF);
+         return;
+      }
+
+      if (VG_(resolve_filename)(ARG1, &fname) == False) {
+         SET_STATUS_Failure(VKI_EBADF);
+         return;
+      }
+
+      struct vg_stat stats;
+      if (VG_(fstat)(ARG1, &stats) != 0) {
+         SET_STATUS_Failure(VKI_EBADF);
+         return;
+      }
+
+      if (stats.nlink > 1)
+         VG_(unimplemented)("Syswrap of execve where fd points to a hardlink.");
+   }
+
+   /* Check that the name at least begins in client-accessible storage. */
+   if (ARG1_is_fd == False) {
+      if ((fname == NULL) || !ML_(safe_to_deref)(fname, 1)) {
+         SET_STATUS_Failure(VKI_EFAULT);
+         return;
+      }
+   }
+
+   /* Check that the args at least begin in client-accessible storage.
+      Solaris disallows to perform the exec without any arguments specified.
+    */
+   if (!ARG2 /* obviously bogus */ ||
+       !VG_(am_is_valid_for_client)(ARG2, 1, VKI_PROT_READ)) {
+      SET_STATUS_Failure(VKI_EFAULT);
+      return;
+   }
+
+   /* Debug-only printing. */
+   if (0) {
+      VG_(printf)("ARG1 = %#lx(%s)\n", ARG1, fname);
+      if (ARG2) {
+         Int q;
+         HChar** vec = (HChar**)ARG2;
+
+         VG_(printf)("ARG2 = ");
+         for (q = 0; vec[q]; q++)
+            VG_(printf)("%p(%s) ", vec[q], vec[q]);
+         VG_(printf)("\n");
+      }
+      else
+         VG_(printf)("ARG2 = null\n");
+   }
+
+   /* Decide whether or not we want to follow along. */
+   /* Make 'child_argv' be a pointer to the child's arg vector (skipping the
+      exe name) */
+   const HChar **child_argv = (const HChar **) ARG2;
+   if (child_argv[0] == NULL)
+      child_argv = NULL;
+   Bool trace_this_child = VG_(should_we_trace_this_child)(fname, child_argv);
+
+   /* Do the important checks:  it is a file, is executable, permissions are
+      ok, etc.  We allow setuid executables to run only in the case when
+      we are not simulating them, that is, they to be run natively. */
+   Bool setuid_allowed = trace_this_child ? False : True;
+   SysRes res = VG_(pre_exec_check)(fname, NULL, setuid_allowed);
+   if (sr_isError(res)) {
+      SET_STATUS_Failure(sr_Err(res));
+      return;
+   }
+
+   /* If we're tracing the child, and the launcher name looks bogus (possibly
+      because launcher.c couldn't figure it out, see comments therein) then we
+      have no option but to fail. */
+   if (trace_this_child &&
+       (!VG_(name_of_launcher) || VG_(name_of_launcher)[0] != '/')) {
+      SET_STATUS_Failure(VKI_ECHILD); /* "No child processes." */
+      return;
+   }
+
+   /* After this point, we can't recover if the execve fails. */
+   VG_(debugLog)(1, "syswrap", "Exec of %s\n", fname);
+
+   /* Terminate gdbserver if it is active. */
+   if (VG_(clo_vgdb) != Vg_VgdbNo) {
+      /* If the child will not be traced, we need to terminate gdbserver to
+         cleanup the gdbserver resources (e.g. the FIFO files). If child will
+         be traced, we also terminate gdbserver: the new Valgrind will start a
+         fresh gdbserver after exec. */
+      VG_(gdbserver)(0);
+   }
+
+   /* Resistance is futile.  Nuke all other threads.  POSIX mandates this.
+      (Really, nuke them all, since the new process will make its own new
+      thread.) */
+   VG_(nuke_all_threads_except)(tid, VgSrc_ExitThread);
+   VG_(reap_threads)(tid);
+
+   /* Set up the child's exe path. */
+   const HChar *path = fname;
+   const HChar *launcher_basename = NULL;
+   if (trace_this_child) {
+      /* We want to exec the launcher.  Get its pre-remembered path. */
+      path = VG_(name_of_launcher);
+      /* VG_(name_of_launcher) should have been acquired by m_main at
+         startup. */
+      vg_assert(path);
+
+      launcher_basename = VG_(strrchr)(path, '/');
+      if (!launcher_basename || launcher_basename[1] == '\0')
+         launcher_basename = path;  /* hmm, tres dubious */
+      else
+         launcher_basename++;
+   }
+
+   /* Set up the child's environment.
+
+      Remove the valgrind-specific stuff from the environment so the child
+      doesn't get vgpreload_core.so, vgpreload_<tool>.so, etc.  This is done
+      unconditionally, since if we are tracing the child, the child valgrind
+      will set up the appropriate client environment.  Nb: we make a copy of
+      the environment before trying to mangle it as it might be in read-only
+      memory (bug #101881).
+
+      Then, if tracing the child, set VALGRIND_LIB for it. */
+   HChar **envp = NULL;
+   if (ARG3 != 0) {
+      envp = VG_(env_clone)((HChar**)ARG3);
+      vg_assert(envp != NULL);
+      VG_(env_remove_valgrind_env_stuff)(envp, True /*ro_strings*/, NULL);
+   }
+
+   if (trace_this_child) {
+      /* Set VALGRIND_LIB in ARG3 (the environment). */
+      VG_(env_setenv)( &envp, VALGRIND_LIB, VG_(libdir));
+   }
+
+   /* Set up the child's args.  If not tracing it, they are simply ARG2.
+      Otherwise, they are:
+
+      [launcher_basename] ++ VG_(args_for_valgrind) ++ [ARG1] ++ ARG2[1..],
+
+      except that the first VG_(args_for_valgrind_noexecpass) args are
+      omitted. */
+   HChar **argv = NULL;
+   if (!trace_this_child)
+      argv = (HChar **) ARG2;
+   else {
+      Int tot_args;
+
+      vg_assert(VG_(args_for_valgrind));
+      vg_assert(VG_(args_for_valgrind_noexecpass) >= 0);
+      vg_assert(VG_(args_for_valgrind_noexecpass)
+                   <= VG_(sizeXA)(VG_(args_for_valgrind)));
+
+      /* How many args in total will there be? */
+      /* launcher basename */
+      tot_args = 1;
+      /* V's args */
+      tot_args += VG_(sizeXA)(VG_(args_for_valgrind));
+      tot_args -= VG_(args_for_valgrind_noexecpass);
+      /* name of client exe */
+      tot_args++;
+      /* args for client exe, skipping [0] */
+      HChar **arg2copy = (HChar **) ARG2;
+      if (arg2copy[0] != NULL)
+         for (i = 1; arg2copy[i]; i++)
+            tot_args++;
+      /* allocate */
+      argv = VG_(malloc)("syswrap.exec.5", (tot_args + 1) * sizeof(HChar*));
+      /* copy */
+      j = 0;
+      argv[j++] = CONST_CAST(HChar *, launcher_basename);
+      for (i = 0; i < VG_(sizeXA)(VG_(args_for_valgrind)); i++) {
+         if (i < VG_(args_for_valgrind_noexecpass))
+            continue;
+         argv[j++] = *(HChar**)VG_(indexXA)(VG_(args_for_valgrind), i);
+      }
+      argv[j++] = CONST_CAST(HChar *, fname);
+      if (arg2copy[0] != NULL)
+         for (i = 1; arg2copy[i]; i++)
+            argv[j++] = arg2copy[i];
+      argv[j++] = NULL;
+      /* check */
+      vg_assert(j == tot_args + 1);
+   }
+
+   /* Set the signal state up for exec.
+
+      We need to set the real signal state to make sure the exec'd process
+      gets SIG_IGN properly.
+
+      Also set our real sigmask to match the client's sigmask so that the
+      exec'd child will get the right mask.  First we need to clear out any
+      pending signals so they they don't get delivered, which would confuse
+      things.
+
+      XXX This is a bug - the signals should remain pending, and be delivered
+      to the new process after exec.  There's also a race-condition, since if
+      someone delivers us a signal between the sigprocmask and the execve,
+      we'll still get the signal. Oh well.
+   */
+   {
+      vki_sigset_t allsigs;
+      vki_siginfo_t info;
+
+      /* What this loop does: it queries SCSS (the signal state that the
+         client _thinks_ the kernel is in) by calling VG_(do_sys_sigaction),
+         and modifies the real kernel signal state accordingly. */
+      for (i = 1; i < VG_(max_signal); i++) {
+         vki_sigaction_fromK_t sa_f;
+         vki_sigaction_toK_t   sa_t;
+         VG_(do_sys_sigaction)(i, NULL, &sa_f);
+         VG_(convert_sigaction_fromK_to_toK)(&sa_f, &sa_t);
+         VG_(sigaction)(i, &sa_t, NULL);
+      }
+
+      VG_(sigfillset)(&allsigs);
+      while (VG_(sigtimedwait_zero)(&allsigs, &info) > 0)
+         ;
+
+      ThreadState *tst = VG_(get_ThreadState)(tid);
+      VG_(sigprocmask)(VKI_SIG_SETMASK, &tst->sig_mask, NULL);
+   }
+
+   /* Restore the DATA rlimit for the child. */
+   VG_(setrlimit)(VKI_RLIMIT_DATA, &VG_(client_rlimit_data));
+
+   /* Debug-only printing. */
+   if (0) {
+      HChar **cpp;
+      VG_(printf)("exec: %s\n", path);
+      for (cpp = argv; cpp && *cpp; cpp++)
+         VG_(printf)("argv: %s\n", *cpp);
+      if (0)
+         for (cpp = envp; cpp && *cpp; cpp++)
+            VG_(printf)("env: %s\n", *cpp);
+   }
+
+#if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
+   res = VG_(do_syscall4)(__NR_execve, (UWord) path, (UWord) argv,
+                          (UWord) envp, ARG4 & ~VKI_EXEC_DESCRIPTOR);
+#else
+   res = VG_(do_syscall3)(__NR_execve, (UWord) path, (UWord) argv,
+                          (UWord) envp);
+#endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
+   SET_STATUS_from_SysRes(res);
+
+   /* If we got here, then the execve failed.  We've already made way too much
+      of a mess to continue, so we have to abort. */
+   vg_assert(FAILURE);
+#if defined(SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS)
+   if (ARG1_is_fd)
+      VG_(message)(Vg_UserMsg, "execve(%ld, %#lx, %#lx, %ld) failed, "
+                   "errno %ld\n", ARG1, ARG2, ARG3, ARG4, ERR);
+   else
+      VG_(message)(Vg_UserMsg, "execve(%#lx(%s), %#lx, %#lx, %ld) failed, errno"
+                   " %ld\n", ARG1, (HChar *) ARG1, ARG2, ARG3, ARG4, ERR);
+#else
+   VG_(message)(Vg_UserMsg, "execve(%#lx(%s), %#lx, %#lx) failed, errno %ld\n",
+                ARG1, (HChar *) ARG1, ARG2, ARG3, ERR);
+#endif /* SOLARIS_EXECVE_SYSCALL_TAKES_FLAGS */
+   VG_(message)(Vg_UserMsg, "EXEC FAILED: I can't recover from "
+                            "execve() failing, so I'm dying.\n");
+   VG_(message)(Vg_UserMsg, "Add more stringent tests in PRE(sys_execve), "
+                            "or work out how to recover.\n");
+   VG_(exit)(101);
+   /*NOTREACHED*/
+}
+
+static void pre_mem_read_flock(ThreadId tid, struct vki_flock *lock)
+{
+   PRE_FIELD_READ("fcntl(lock->l_type)", lock->l_type);
+   PRE_FIELD_READ("fcntl(lock->l_whence)", lock->l_whence);
+   PRE_FIELD_READ("fcntl(lock->l_start)", lock->l_start);
+   PRE_FIELD_READ("fcntl(lock->l_len)", lock->l_len);
+}
+
+#if defined(VGP_x86_solaris)
+static void pre_mem_read_flock64(ThreadId tid, struct vki_flock64 *lock)
+{
+   PRE_FIELD_READ("fcntl(lock->l_type)", lock->l_type);
+   PRE_FIELD_READ("fcntl(lock->l_whence)", lock->l_whence);
+   PRE_FIELD_READ("fcntl(lock->l_start)", lock->l_start);
+   PRE_FIELD_READ("fcntl(lock->l_len)", lock->l_len);
+}
+#endif /* VGP_x86_solaris */
+
+PRE(sys_fcntl)
+{
+   /* int fcntl(int fildes, int cmd, ...); */
+
+   switch (ARG2 /*cmd*/) {
+   /* These ones ignore ARG3. */
+   case VKI_F_GETFD:
+   case VKI_F_GETFL:
+   case VKI_F_GETXFL:
+      PRINT("sys_fcntl ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, "fcntl", int, fildes, int, cmd);
+      break;
+
+   /* These ones use ARG3 as "arg". */
+   case VKI_F_DUPFD:
+   case VKI_F_SETFD:
+   case VKI_F_SETFL:
+   case VKI_F_DUP2FD:
+   case VKI_F_BADFD:
+      PRINT("sys_fcntl ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd, int, arg);
+      /* Check if a client program isn't going to poison any of V's output
+         fds. */
+      if (ARG2 == VKI_F_DUP2FD &&
+          !ML_(fd_allowed)(ARG3, "fcntl(F_DUP2FD)", tid, False)) {
+         SET_STATUS_Failure(VKI_EBADF);
+         return;
+      }
+      break;
+
+   /* These ones use ARG3 as "native lock" (input only). */
+   case VKI_F_SETLK:
+   case VKI_F_SETLKW:
+   case VKI_F_ALLOCSP:
+   case VKI_F_FREESP:
+   case VKI_F_SETLK_NBMAND:
+      PRINT("sys_fcntl ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
+                    struct flock *, lock);
+      pre_mem_read_flock(tid, (struct vki_flock*)ARG3);
+      break;
+
+   /* This one uses ARG3 as "native lock" (input&output). */
+   case VKI_F_GETLK:
+      PRINT("sys_fcntl ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
+                    struct flock *, lock);
+      pre_mem_read_flock(tid, (struct vki_flock*)ARG3);
+      PRE_MEM_WRITE("fcntl(lock)", ARG3, sizeof(struct vki_flock));
+      break;
+
+#if defined(VGP_x86_solaris)
+   /* These ones use ARG3 as "transitional 64b lock" (input only). */
+   case VKI_F_SETLK64:
+   case VKI_F_SETLKW64:
+   case VKI_F_ALLOCSP64:
+   case VKI_F_FREESP64:
+   case VKI_F_SETLK64_NBMAND:
+      PRINT("sys_fcntl ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
+                    struct flock64 *, lock);
+      pre_mem_read_flock64(tid, (struct vki_flock64*)ARG3);
+      break;
+
+   /* This one uses ARG3 as "transitional 64b lock" (input&output). */
+   case VKI_F_GETLK64:
+      PRINT("sys_fcntl ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
+                    struct flock64 *, lock);
+      pre_mem_read_flock64(tid, (struct vki_flock64*)ARG3);
+      PRE_MEM_WRITE("fcntl(lock)", ARG3, sizeof(struct vki_flock64));
+      break;
+#endif /* VGP_x86_solaris */
+
+   /* These ones use ARG3 as "fshare". */
+   case VKI_F_SHARE:
+   case VKI_F_UNSHARE:
+   case VKI_F_SHARE_NBMAND:
+      PRINT("sys_fcntl[ARG3=='fshare'] ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, "fcntl", int, fildes, int, cmd,
+                    struct fshare *, sh);
+      PRE_MEM_READ("fcntl(fshare)", ARG3, sizeof(struct vki_fshare));
+      break;
+
+   default:
+      VG_(unimplemented)("Syswrap of the fcntl call with cmd %ld.", ARG2);
+      /*NOTREACHED*/
+      break;
+   }
+
+   if (ARG2 == VKI_F_SETLKW
+#if defined(VGP_x86_solaris)
+       || ARG2 == VKI_F_SETLKW64
+#endif /* VGP_x86_solaris */
+       )
+      *flags |= SfMayBlock;
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "fcntl", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_fcntl)
+{
+   switch (ARG2 /*cmd*/) {
+   case VKI_F_DUPFD:
+      if (!ML_(fd_allowed)(RES, "fcntl(F_DUPFD)", tid, True)) {
+         VG_(close)(RES);
+         SET_STATUS_Failure(VKI_EMFILE);
+      }
+      else if (VG_(clo_track_fds))
+         ML_(record_fd_open_named)(tid, RES);
+      break;
+
+   case VKI_F_DUP2FD:
+      if (!ML_(fd_allowed)(RES, "fcntl(F_DUP2FD)", tid, True)) {
+         VG_(close)(RES);
+         SET_STATUS_Failure(VKI_EMFILE);
+      }
+      else if (VG_(clo_track_fds))
+         ML_(record_fd_open_named)(tid, RES);
+      break;
+
+   /* This one uses ARG3 as "native lock" (input&output). */
+   case VKI_F_GETLK:
+      POST_MEM_WRITE(ARG3, sizeof(struct vki_flock));
+      break;
+
+#if defined(VGP_x86_solaris)
+   /* This one uses ARG3 as "transitional 64b lock" (input&output). */
+   case VKI_F_GETLK64:
+      POST_MEM_WRITE(ARG3, sizeof(struct vki_flock64));
+      break;
+#endif /* VGP_x86_solaris */
+
+   default:
+      break;
+   }
+}
+
+PRE(sys_renameat)
+{
+   /* int renameat(int fromfd, const char *old, int tofd, const char *new); */
+
+   /* Interpret the first and third arguments as 32-bit values even on 64-bit
+      architecture. This is different from Linux, for example, where glibc
+      sign-extends them. */
+   Int fromfd = (Int) ARG1;
+   Int tofd = (Int) ARG3;
+
+   *flags |= SfMayBlock;
+   PRINT("sys_renameat ( %d, %#lx(%s), %d, %#lx(%s) )", fromfd,
+         ARG2, (HChar *) ARG2, tofd, ARG4, (HChar *) ARG4);
+   PRE_REG_READ4(long, "renameat", int, fromfd, const char *, old,
+                 int, tofd, const char *, new);
+
+   PRE_MEM_RASCIIZ("renameat(old)", ARG2);
+   PRE_MEM_RASCIIZ("renameat(new)", ARG4);
+
+   /* Be strict but ignore fromfd/tofd for absolute old/new. */
+   if (fromfd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(fromfd, "renameat", tid, False)) {
+      SET_STATUS_Failure(VKI_EBADF);
+   }
+   if (tofd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG4, 1)
+       && ((HChar *) ARG4)[0] != '/'
+       && !ML_(fd_allowed)(tofd, "renameat", tid, False)) {
+      SET_STATUS_Failure(VKI_EBADF);
+   }
+}
+
+PRE(sys_unlinkat)
+{
+   /* int unlinkat(int dirfd, const char *pathname, int flags); */
+
+   /* Interpret the first argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int dfd = (Int) ARG1;
+
+   *flags |= SfMayBlock;
+   PRINT("sys_unlinkat ( %d, %#lx(%s), %ld )", dfd, ARG2, (HChar *) ARG2,
+         ARG3);
+   PRE_REG_READ3(long, "unlinkat", int, dirfd, const char *, pathname,
+                 int, flags);
+   PRE_MEM_RASCIIZ("unlinkat(pathname)", ARG2);
+
+   /* Be strict but ignore dfd for absolute pathname. */
+   if (dfd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(dfd, "unlinkat", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_fstatat)
+{
+   /* int fstatat(int fildes, const char *path, struct stat *buf,
+                    int flag); */
+
+   /* Interpret the first argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int fd = (Int) ARG1;
+
+   PRINT("sys_fstatat ( %d, %#lx(%s), %#lx, %ld )", fd, ARG2,
+         (HChar *) ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "fstatat", int, fildes, const char *, path,
+                 struct stat *, buf, int, flag);
+   if (ARG2) {
+      /* Only test ARG2 if it isn't NULL.  The kernel treats the NULL-case as
+         fstat(fildes, buf). */
+      PRE_MEM_RASCIIZ("fstatat(path)", ARG2);
+   }
+   PRE_MEM_WRITE("fstatat(buf)", ARG3, sizeof(struct vki_stat));
+
+   /* Be strict but ignore fildes for absolute path. */
+   if (fd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(fd, "fstatat", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_fstatat)
+{
+   POST_MEM_WRITE(ARG3, sizeof(struct vki_stat));
+}
+
+PRE(sys_openat)
+{
+   /* int openat(int fildes, const char *filename, int flags);
+      int openat(int fildes, const char *filename, int flags, mode_t mode); */
+
+   /* Interpret the first argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int fd = (Int) ARG1;
+
+   if (ARG3 & VKI_O_CREAT) {
+      /* 4-arg version */
+      PRINT("sys_openat ( %d, %#lx(%s), %ld, %ld )", fd, ARG2, (HChar *) ARG2,
+            ARG3, ARG4);
+      PRE_REG_READ4(long, "openat", int, fildes, const char *, filename,
+                    int, flags, vki_mode_t, mode);
+   }
+   else {
+      /* 3-arg version */
+      PRINT("sys_openat ( %d, %#lx(%s), %ld )", fd, ARG2, (HChar *) ARG2,
+            ARG3);
+      PRE_REG_READ3(long, "openat", int, fildes, const char *, filename,
+                    int, flags);
+   }
+
+   PRE_MEM_RASCIIZ("openat(filename)", ARG2);
+
+   /* Be strict but ignore fildes for absolute pathname. */
+   if (fd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(fd, "openat", tid, False)) {
+      SET_STATUS_Failure(VKI_EBADF);
+      return;
+   }
+
+   if (ML_(handle_auxv_open)(status, (const HChar*)ARG2, ARG3))
+      return;
+
+   if (handle_psinfo_open(status, True /*use_openat*/, (const HChar*)ARG2,
+                          fd, ARG3, ARG4))
+      return;
+
+   *flags |= SfMayBlock;
+}
+
+POST(sys_openat)
+{
+   if (!ML_(fd_allowed)(RES, "openat", tid, True)) {
+      VG_(close)(RES);
+      SET_STATUS_Failure(VKI_EMFILE);
+   }
+   else if (VG_(clo_track_fds))
+      ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG2);
+}
+
+PRE(sys_tasksys)
+{
+   /* Kernel: long tasksys(int code, projid_t projid, uint_t flags,
+                           void *projidbuf, size_t pbufsz);
+    */
+   switch (ARG1 /*code*/) {
+   case 0:
+      /* Libc: taskid_t settaskid(projid_t project, uint_t flags); */
+      PRINT("sys_tasksys ( %ld, %ld, %lu )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("tasksys", "settaskid"), int, code,
+                    vki_projid_t, projid, vki_uint_t, flags);
+      break;
+   case 1:
+      /* Libc: taskid_t gettaskid(void); */
+      PRINT("sys_tasksys ( %ld )", ARG1);
+      PRE_REG_READ1(long, SC2("tasksys", "gettaskid"), int, code);
+      break;
+   case 2:
+      /* Libc: projid_t getprojid(void); */
+      PRINT("sys_tasksys ( %ld )", ARG1);
+      PRE_REG_READ1(long, SC2("tasksys", "getprojid"), int, code);
+      break;
+   case 3:
+      /* Libproject: size_t projlist(id_t *idbuf, size_t idbufsz); */
+      PRINT("sys_tasksys ( %ld, %#lx, %lu )", ARG1, ARG4, ARG5);
+      PRE_REG_READ3(long, SC2("tasksys", "projlist"), int, code,
+                    vki_id_t *, idbuf, vki_size_t, idbufsz);
+      PRE_MEM_WRITE("tasksys(idbuf)", ARG4, ARG5);
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the tasksys call with code %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_tasksys)
+{
+   switch (ARG1 /*code*/) {
+   case 0:
+   case 1:
+   case 2:
+      break;
+   case 3:
+      if ((ARG4 != 0) && (ARG5 != 0))
+         POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_lwp_park)
+{
+   /* Kernel: int lwp_park(int which, uintptr_t arg1, uintptr_t arg2);
+    */
+   *flags |= SfMayBlock;
+   switch (ARG1 /*which*/) {
+   case 0:
+      /* Libc: int lwp_park(timespec_t *timeout, id_t lwpid); */
+      PRINT("sys_lwp_park ( %ld, %#lx, %ld )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("lwp_park", "lwp_park"), int, which,
+                    timespec_t *, timeout, vki_id_t, lwpid);
+      if (ARG2) {
+         PRE_MEM_READ("lwp_park(timeout)", ARG2, sizeof(vki_timespec_t));
+         /*PRE_MEM_WRITE("lwp_park(timeout)", ARG2,
+                         sizeof(vki_timespec_t));*/
+      }
+      break;
+   case 1:
+      /* Libc: int lwp_unpark(id_t lwpid); */
+      PRINT("sys_lwp_park ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("lwp_park", "lwp_unpark"), int, which,
+                    vki_id_t, lwpid);
+      break;
+   case 2:
+      /* Libc: int lwp_unpark_all(id_t *lwpid, int nids); */
+      PRINT("sys_lwp_park ( %ld, %#lx, %ld )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("lwp_park", "lwp_unpark_all"), int, which,
+                    id_t *, lwpid, int, nids);
+      PRE_MEM_READ("lwp_park(lwpid)", ARG2, ARG3 * sizeof(vki_id_t));
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the lwp_park call with which %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_lwp_park)
+{
+   switch (ARG1 /*which*/) {
+   case 0:
+      if (ARG2)
+         POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
+      break;
+   case 1:
+   case 2:
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_sendfilev)
+{
+   /* Kernel: ssize_t sendfilev(int opcode, int fd,
+                                const struct sendfilevec *vec,
+                                int sfvcnt, size_t *xferred);
+    */
+   PRINT("sys_sendfilev ( %ld, %ld, %#lx, %ld, %#lx )",
+         ARG1, ARG2, ARG3, ARG4, ARG5);
+
+   switch (ARG1 /*opcode*/) {
+   case VKI_SENDFILEV:
+      {
+         PRE_REG_READ5(long, "sendfilev", int, opcode, int, fd,
+                       const struct vki_sendfilevec *, vec,
+                       int, sfvcnt, vki_size_t *, xferred);
+
+         PRE_MEM_READ("sendfilev(vec)", ARG3,
+                      ARG4 * sizeof(struct vki_sendfilevec));
+         PRE_MEM_WRITE("sendfilev(xferred)", ARG5, sizeof(vki_size_t));
+
+         struct vki_sendfilevec *vec = (struct vki_sendfilevec *) ARG3;
+         if (ML_(safe_to_deref)(vec, ARG4 *
+                                sizeof(struct vki_sendfilevec))) {
+            UInt i;
+            for (i = 0; i < ARG4; i++) {
+               HChar desc[35];    // large enough
+               if (vec[i].sfv_fd == VKI_SFV_FD_SELF) {
+                  VG_(snprintf)(desc, sizeof(desc),
+                                "sendfilev(vec[%u].sfv_off", i);
+                  PRE_MEM_READ(desc, vec[i].sfv_off, vec[i].sfv_len);
+               } else {
+                  VG_(snprintf)(desc, sizeof(desc),
+                                "sendfilev(vec[%u].sfv_fd)", i);
+                  if (!ML_(fd_allowed)(vec[i].sfv_fd, desc, tid, False))
+                     SET_STATUS_Failure(VKI_EBADF);
+               }
+            }
+         }
+      }
+      break;
+   case VKI_SENDFILEV64:
+      {
+         PRE_REG_READ5(long, "sendfilev", int, opcode, int, fd,
+                       const struct vki_sendfilevec64 *, vec,
+                       int, sfvcnt, vki_size_t *, xferred);
+
+         PRE_MEM_READ("sendfilev(vec)", ARG3,
+                      ARG4 * sizeof(struct vki_sendfilevec64));
+         PRE_MEM_WRITE("sendfilev(xferred)", ARG5, sizeof(vki_size_t));
+
+         struct vki_sendfilevec64 *vec64 =
+            (struct vki_sendfilevec64 *) ARG3;
+         if (ML_(safe_to_deref)(vec64, ARG4 *
+                                sizeof(struct vki_sendfilevec64))) {
+            UInt i;
+            for (i = 0; i < ARG4; i++) {
+               HChar desc[35];    // large enough
+               if (vec64[i].sfv_fd == VKI_SFV_FD_SELF) {
+                  VG_(snprintf)(desc, sizeof(desc),
+                                "sendfilev(vec[%u].sfv_off", i);
+                  PRE_MEM_READ(desc, vec64[i].sfv_off, vec64[i].sfv_len);
+               } else {
+                  VG_(snprintf)(desc, sizeof(desc),
+                                "sendfilev(vec[%u].sfv_fd)", i);
+                  if (!ML_(fd_allowed)(vec64[i].sfv_fd, desc,
+                                       tid, False))
+                     SET_STATUS_Failure(VKI_EBADF);
+               }
+            }
+         }
+      }
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the sendfilev call with "
+                         "opcode %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG2, "sendfilev(fd)", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+
+   *flags |= SfMayBlock;
+}
+
+POST(sys_sendfilev)
+{
+   POST_MEM_WRITE(ARG5, sizeof(vki_size_t));
+}
+
+#if defined(SOLARIS_LWP_NAME_SYSCALL)
+PRE(sys_lwp_name)
+{
+   /* int lwp_name(int opcode, id_t lwpid, char *name, size_t len); */
+   PRINT("sys_lwp_name ( %ld, %ld, %#lx, %ld )", ARG1, ARG2, ARG3, ARG4);
+
+   switch (ARG1 /*opcode*/) {
+   case 0:
+      /* lwp_setname */
+      PRE_REG_READ3(long, "lwp_name", int, opcode, vki_id_t, lwpid,
+                    char *, name);
+      PRE_MEM_RASCIIZ("lwp_name(name)", ARG3);
+      break;
+   case 1:
+      /* lwp_getname */
+      PRE_REG_READ4(long, "lwp_name", int, opcode, vki_id_t, lwpid,
+                    char *, name, vki_size_t, len);
+      PRE_MEM_WRITE("lwp_name(name)", ARG3, ARG4);
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the lwp_name call with opcode %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_lwp_name)
+{
+   switch (ARG1 /*opcode*/) {
+   case 0:
+      if (ARG3) { // Paranoia
+         const HChar *new_name = (const HChar *) ARG3;
+         ThreadState *tst = VG_(get_ThreadState)(tid);
+         SizeT new_len = VG_(strlen)(new_name);
+
+         /* Don't bother reusing the memory. This is a rare event. */
+         tst->thread_name = VG_(realloc)("syswrap.lwp_name", tst->thread_name,
+                                         new_len + 1);
+         VG_(strcpy)(tst->thread_name, new_name);
+      }
+      break;
+   case 1:
+      POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+#endif /* SOLARIS_LWP_NAME_SYSCALL */
+
+PRE(sys_privsys)
+{
+   /* Kernel: int privsys(int code, priv_op_t op, priv_ptype_t type,
+                          void *buf, size_t bufsize, int itype);
+    */
+   switch (ARG1 /*code*/) {
+   case VKI_PRIVSYS_SETPPRIV:
+      /* Libc: int setppriv(priv_op_t op, priv_ptype_t type,
+                            const priv_set_t *pset);
+       */
+      PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", ARG1, ARG2, ARG3,
+            ARG4, ARG5);
+      PRE_REG_READ5(long, SC2("privsys", "setppriv"), int, code,
+                    vki_priv_op_t, op, vki_priv_ptype_t, type,
+                    const priv_set_t *, pset, vki_size_t, bufsize);
+      PRE_MEM_READ("privsys(pset)", ARG4, ARG5);
+      break;
+   case VKI_PRIVSYS_GETPPRIV:
+      /* Libc: int getppriv(priv_ptype_t type, priv_set_t *pset);
+               priv_set_t *pset -> void *buf
+       */
+      PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %lu )", ARG1, ARG2, ARG3,
+            ARG4, ARG5);
+      PRE_REG_READ5(long, SC2("privsys", "getppriv"), int, code,
+            vki_priv_op_t, op, vki_priv_ptype_t, type, priv_set_t *, pset,
+            vki_size_t, bufsize);
+      PRE_MEM_WRITE("privsys(pset)", ARG4, ARG5);
+      break;
+   case VKI_PRIVSYS_GETIMPLINFO:
+      /* Libc: int getprivinfo(priv_impl_info_t *buf, size_t bufsize);
+               priv_impl_info_t *buf -> void *buf
+       */
+      PRINT("sys_privsys ( %ld, %ld, %ld, %#lx, %ld )", ARG1, ARG2, ARG3,
+            ARG4, ARG5);
+      PRE_REG_READ5(long, SC2("privsys", "getprivinfo"), int, code,
+            vki_priv_op_t, op, vki_priv_ptype_t, type,
+            priv_impl_info_t *, buf, vki_size_t, bufsize);
+      PRE_MEM_WRITE("privsys(buf)", ARG4, ARG5);
+      break;
+   case VKI_PRIVSYS_SETPFLAGS:
+      /* Libc: int setpflags(uint_t flag, uint_t val);
+               uint_t flag -> priv_op_t op
+               uint_t val -> priv_ptype_t type
+       */
+      PRINT("sys_privsys ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("privsys", "setpflags"), int, code,
+                    vki_uint_t, flag, vki_uint_t, val);
+      break;
+   case VKI_PRIVSYS_GETPFLAGS:
+      /* Libc: uint_t getpflags(uint_t flag);
+               uint_t flag -> priv_op_t op
+       */
+      PRINT("sys_privsys ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("privsys", "setpflags"), int, code,
+                    vki_uint_t, flag);
+      break;
+   case VKI_PRIVSYS_ISSETUGID:
+      /* Libc: int issetugid(void); */
+      PRINT("sys_privsys ( %ld )", ARG1);
+      PRE_REG_READ1(long, SC2("privsys", "issetugid"), int, code);
+      break;
+   case VKI_PRIVSYS_PFEXEC_REG:
+      /* Libc: int register_pfexec(int did);
+               int did -> priv_op_t op
+       */
+      PRINT("sys_privsys ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("privsys", "register_pfexec"), int, code,
+                    int, did);
+      break;
+   case VKI_PRIVSYS_PFEXEC_UNREG:
+      /* Libc: int unregister_pfexec(int did); */
+      PRINT("sys_privsys ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("privsys", "unregister_pfexec"), int, code,
+                    int, did);
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the privsys call with code %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+
+   /* Be strict. */
+   if ((ARG1 == VKI_PRIVSYS_PFEXEC_REG ||
+        ARG1 == VKI_PRIVSYS_PFEXEC_UNREG) &&
+       !ML_(fd_allowed)(ARG2, "privsys", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_privsys)
+{
+   switch (ARG1 /*code*/) {
+   case VKI_PRIVSYS_SETPPRIV:
+      break;
+   case VKI_PRIVSYS_GETPPRIV:
+      POST_MEM_WRITE(ARG4, sizeof(vki_priv_set_t));
+      break;
+   case VKI_PRIVSYS_GETIMPLINFO:
+      /* The kernel copy outs data of size min(bufsize, privinfosize).
+         Unfortunately, it does not seem to be possible to easily obtain the
+         privinfosize value.  The code below optimistically marks all ARG5
+         bytes (aka bufsize) as written by the kernel. */
+      POST_MEM_WRITE(ARG4, ARG5);
+      break;
+   case VKI_PRIVSYS_SETPFLAGS:
+   case VKI_PRIVSYS_GETPFLAGS:
+   case VKI_PRIVSYS_ISSETUGID:
+   case VKI_PRIVSYS_PFEXEC_REG:
+   case VKI_PRIVSYS_PFEXEC_UNREG:
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_ucredsys)
+{
+   /* Kernel: int ucredsys(int code, int obj, void *buf); */
+   PRINT("sys_ucredsys ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+
+   switch (ARG1 /*code*/) {
+   case VKI_UCREDSYS_UCREDGET:
+      /* Libc: ucred_t *ucred_get(pid_t pid); */
+      PRE_REG_READ3(long, SC2("ucredsys", "ucredget"), int, code,
+                    vki_pid_t, pid, vki_ucred_t *, buf);
+      PRE_MEM_WRITE("ucredsys(buf)", ARG3, sizeof(vki_ucred_t));
+      break;
+
+   case VKI_UCREDSYS_GETPEERUCRED:
+      /* Libc: int getpeerucred(int fd, ucred_t **ucred); */
+      PRE_REG_READ3(long, SC2("ucredsys", "getpeerucred"), int, code,
+                    int, fd, vki_ucred_t *, buf);
+      PRE_MEM_WRITE("ucredsys(buf)", ARG3, sizeof(vki_ucred_t));
+
+      /* Be strict. */
+      if (!ML_(fd_allowed)(ARG2, "ucredsys", tid, False))
+         SET_STATUS_Failure(VKI_EBADF);
+      break;
+
+   default:
+      VG_(unimplemented)("Syswrap of the ucredsys call with code %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_ucredsys)
+{
+   switch (ARG1 /*code*/) {
+   case VKI_UCREDSYS_UCREDGET:
+   case VKI_UCREDSYS_GETPEERUCRED:
+      vg_assert(ARG3 != 0);
+      POST_MEM_WRITE(ARG3, ((vki_ucred_t *) ARG3)->uc_size);
+      break;
+
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_getmsg)
+{
+   /* int getmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
+                 int *flagsp); */
+   struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
+   struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
+   *flags |= SfMayBlock;
+   PRINT("sys_getmsg ( %ld, %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "getmsg", int, fildes, struct vki_strbuf *, ctlptr,
+                 struct vki_strbuf *, dataptr, int *, flagsp);
+   if (ctrlptr) {
+      PRE_FIELD_READ("getmsg(ctrlptr->maxlen)", ctrlptr->maxlen);
+      PRE_FIELD_WRITE("getmsg(ctrlptr->len)", ctrlptr->len);
+      PRE_FIELD_READ("getmsg(ctrlptr->buf)", ctrlptr->buf);
+      if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
+          && ctrlptr->maxlen > 0)
+         PRE_MEM_WRITE("getmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
+                       ctrlptr->maxlen);
+   }
+   if (dataptr) {
+      PRE_FIELD_READ("getmsg(dataptr->maxlen)", dataptr->maxlen);
+      PRE_FIELD_WRITE("getmsg(dataptr->len)", dataptr->len);
+      PRE_FIELD_READ("getmsg(dataptr->buf)", dataptr->buf);
+      if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
+          && dataptr->maxlen > 0)
+         PRE_MEM_WRITE("getmsg(dataptr->buf)", (Addr)dataptr->buf,
+                       dataptr->maxlen);
+   }
+   PRE_MEM_READ("getmsg(flagsp)", ARG4, sizeof(int));
+   /*PRE_MEM_WRITE("getmsg(flagsp)", ARG4, sizeof(int));*/
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "getmsg", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_getmsg)
+{
+   struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
+   struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
+
+   if (ctrlptr && ctrlptr->len > 0)
+      POST_MEM_WRITE((Addr)ctrlptr->buf, ctrlptr->len);
+   if (dataptr && dataptr->len > 0)
+      POST_MEM_WRITE((Addr)dataptr->buf, dataptr->len);
+   POST_MEM_WRITE(ARG4, sizeof(int));
+}
+
+PRE(sys_putmsg)
+{
+   /* int putmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
+                 int flags); */
+   struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
+   struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
+   *flags |= SfMayBlock;
+   PRINT("sys_putmsg ( %ld, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "putmsg", int, fildes, struct vki_strbuf *, ctrlptr,
+                 struct vki_strbuf *, dataptr, int, flags);
+   if (ctrlptr) {
+      PRE_FIELD_READ("putmsg(ctrlptr->len)", ctrlptr->len);
+      PRE_FIELD_READ("putmsg(ctrlptr->buf)", ctrlptr->buf);
+      if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
+          && ctrlptr->len > 0)
+         PRE_MEM_READ("putmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
+                      ctrlptr->len);
+   }
+   if (dataptr) {
+      PRE_FIELD_READ("putmsg(dataptr->len)", dataptr->len);
+      PRE_FIELD_READ("putmsg(dataptr->buf)", dataptr->buf);
+      if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
+          && dataptr->len > 0)
+         PRE_MEM_READ("putmsg(dataptr->buf)", (Addr)dataptr->buf,
+                      dataptr->len);
+   }
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "putmsg", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_lstat)
+{
+   /* int lstat(const char *path, struct stat *buf); */
+   /* Note: We could use here the sys_newlstat generic wrapper, but the 'new'
+      in its name is rather confusing in the Solaris context, thus we provide
+      our own wrapper. */
+   PRINT("sys_lstat ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
+   PRE_REG_READ2(long, "lstat", const char *, path, struct stat *, buf);
+
+   PRE_MEM_RASCIIZ("lstat(path)", ARG1);
+   PRE_MEM_WRITE("lstat(buf)", ARG2, sizeof(struct vki_stat));
+}
+
+POST(sys_lstat)
+{
+   POST_MEM_WRITE(ARG2, sizeof(struct vki_stat));
+}
+
+PRE(sys_sigprocmask)
+{
+   /* int sigprocmask(int how, const sigset_t *set, sigset_t *oset); */
+   PRINT("sys_sigprocmask ( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "sigprocmask",
+                 int, how, vki_sigset_t *, set, vki_sigset_t *, oset);
+   if (ARG2)
+      PRE_MEM_READ("sigprocmask(set)", ARG2, sizeof(vki_sigset_t));
+   if (ARG3)
+      PRE_MEM_WRITE("sigprocmask(oset)", ARG3, sizeof(vki_sigset_t));
+
+   /* Be safe. */
+   if (ARG2 && !ML_(safe_to_deref((void*)ARG2, sizeof(vki_sigset_t)))) {
+      SET_STATUS_Failure(VKI_EFAULT);
+   }
+   if (ARG3 && !ML_(safe_to_deref((void*)ARG3, sizeof(vki_sigset_t)))) {
+      SET_STATUS_Failure(VKI_EFAULT);
+   }
+
+   if (!FAILURE)
+      SET_STATUS_from_SysRes(
+         VG_(do_sys_sigprocmask)(tid, ARG1 /*how*/, (vki_sigset_t*)ARG2,
+                                 (vki_sigset_t*)ARG3)
+      );
+
+   if (SUCCESS)
+      *flags |= SfPollAfter;
+}
+
+POST(sys_sigprocmask)
+{
+   if (ARG3)
+      POST_MEM_WRITE(ARG3, sizeof(vki_sigset_t));
+}
+
+PRE(sys_sigaction)
+{
+   /* int sigaction(int signal, const struct sigaction *act,
+                    struct sigaction *oact); */
+   PRINT("sys_sigaction ( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "sigaction", int, signal,
+                 const struct sigaction *, act, struct sigaction *, oact);
+
+   /* Note that on Solaris, vki_sigaction_toK_t and vki_sigaction_fromK_t are
+      both typedefs of 'struct sigaction'. */
+
+   if (ARG2) {
+      vki_sigaction_toK_t *sa = (vki_sigaction_toK_t*)ARG2;
+      PRE_FIELD_READ("sigaction(act->sa_flags)", sa->sa_flags);
+      PRE_FIELD_READ("sigaction(act->sa_handler)", sa->ksa_handler);
+      PRE_FIELD_READ("sigaction(act->sa_mask)", sa->sa_mask);
+   }
+   if (ARG3)
+      PRE_MEM_WRITE("sigaction(oact)", ARG3, sizeof(vki_sigaction_fromK_t));
+
+   /* Be safe. */
+   if (ARG2 && !ML_(safe_to_deref((void*)ARG2,
+                                  sizeof(vki_sigaction_toK_t)))) {
+      SET_STATUS_Failure(VKI_EFAULT);
+   }
+   if (ARG3 && !ML_(safe_to_deref((void*)ARG3,
+                                   sizeof(vki_sigaction_fromK_t)))) {
+      SET_STATUS_Failure(VKI_EFAULT);
+   }
+
+   if (!FAILURE)
+      SET_STATUS_from_SysRes(
+         VG_(do_sys_sigaction)(ARG1, (const vki_sigaction_toK_t*)ARG2,
+                              (vki_sigaction_fromK_t*)ARG3));
+}
+
+POST(sys_sigaction)
+{
+   if (ARG3)
+      POST_MEM_WRITE(ARG3, sizeof(vki_sigaction_fromK_t));
+}
+
+PRE(sys_sigpending)
+{
+   /* int sigpending(int flag, sigset_t *setp); */
+   PRINT("sys_sigpending ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "sigpending", int, flag, sigset_t *, setp);
+   PRE_MEM_WRITE("sigpending(setp)", ARG2, sizeof(vki_sigset_t));
+}
+
+POST(sys_sigpending)
+{
+   POST_MEM_WRITE(ARG2, sizeof(vki_sigset_t));
+}
+
+PRE(sys_getsetcontext)
+{
+   /* Kernel: int getsetcontext(int flag, void *arg) */
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   PRINT("sys_getsetcontext ( %ld, %#lx )", ARG1, ARG2);
+   switch (ARG1 /*flag*/) {
+   case VKI_GETCONTEXT:
+      /* Libc: int getcontext(ucontext_t *ucp); */
+      PRE_REG_READ2(long, SC2("getsetcontext", "getcontext"), int, flag,
+                    ucontext_t *, ucp);
+      PRE_MEM_WRITE("getsetcontext(ucp)", ARG2, sizeof(vki_ucontext_t));
+
+      if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_ucontext_t)))) {
+         SET_STATUS_Failure(VKI_EFAULT);
+         return;
+      }
+      VG_(save_context)(tid, (vki_ucontext_t*)ARG2, Vg_CoreSysCall);
+      SET_STATUS_Success(0);
+      break;
+   case VKI_SETCONTEXT:
+      /* Libc: int setcontext(const ucontext_t *ucp); */
+      PRE_REG_READ2(long, SC2("getsetcontext", "setcontext"), int, flag,
+                    const ucontext_t *, ucp);
+
+      if (!ARG2) {
+         /* Setting NULL context causes thread exit. */
+         tst->exitreason = VgSrc_ExitThread;
+         tst->os_state.exitcode = 0;
+         SET_STATUS_Success(0);
+         return;
+      }
+
+      if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_ucontext_t)))) {
+         SET_STATUS_Failure(VKI_EFAULT);
+         return;
+      }
+
+      VG_(restore_context)(tid, (vki_ucontext_t*)ARG2,
+                           Vg_CoreSysCall, False/*esp_is_thrptr*/);
+      /* Tell the driver not to update the guest state with the "result". */
+      *flags |= SfNoWriteResult;
+      /* Check to see if any signals arose as a result of this. */
+      *flags |= SfPollAfter;
+
+      /* Check if this is a possible return from a signal handler. */
+      VG_(sigframe_return)(tid, (vki_ucontext_t*)ARG2);
+
+      SET_STATUS_Success(0);
+      break;
+   case VKI_GETUSTACK:
+      /* Libc: int getustack(stack_t **spp); */
+      PRE_REG_READ2(long, SC2("getsetcontext", "getustack"), int, flag,
+                    stack_t **, spp);
+      PRE_MEM_WRITE("getsetcontext(spp)", ARG2, sizeof(vki_stack_t*));
+
+      if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t*)))) {
+         SET_STATUS_Failure(VKI_EFAULT);
+         return;
+      }
+
+      *(vki_stack_t**)ARG2 = tst->os_state.ustack;
+      POST_MEM_WRITE(ARG2, sizeof(vki_stack_t*));
+      SET_STATUS_Success(0);
+      break;
+   case VKI_SETUSTACK:
+      {
+         /* Libc: int setustack(stack_t *sp); */
+         PRE_REG_READ2(long, SC2("getsetcontext", "setustack"), int, flag,
+                       stack_t *, sp);
+
+         /* The kernel does not read the stack data instantly but it can read
+            them later so it is better to make sure the data are defined. */
+         PRE_MEM_READ("getsetcontext_setustack(sp)", ARG2, sizeof(vki_stack_t));
+
+         if (!ML_(safe_to_deref((void*)ARG2, sizeof(vki_stack_t)))) {
+            SET_STATUS_Failure(VKI_EFAULT);
+            return;
+         }
+
+         vki_stack_t *old_stack = tst->os_state.ustack;
+         tst->os_state.ustack = (vki_stack_t*)ARG2;
+
+         /* The thread is setting the ustack pointer.  It is a good time to get
+            information about its stack. */
+         if (tst->os_state.ustack->ss_flags == 0) {
+            /* If the sanity check of ss_flags passed set the stack. */
+            set_stack(tid, tst->os_state.ustack);
+
+            if ((old_stack == NULL) && (tid > 1)) {
+               /* New thread creation is now completed. Inform the tool. */
+               VG_TRACK(pre_thread_first_insn, tid);
+            }
+         }
+
+         SET_STATUS_Success(0);
+      }
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the context call with flag %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+PRE(sys_fchmodat)
+{
+   /* int fchmodat(int fd, const char *path, mode_t mode, int flag); */
+
+   /* Interpret the first argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int fd = (Int) ARG1;
+
+   PRINT("sys_fchmodat ( %d, %#lx(%s), %ld, %ld )",
+         fd, ARG2, (HChar *) ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "fchmodat",
+                 int, fd, const char *, path, vki_mode_t, mode, int, flag);
+
+   if (ARG2)
+      PRE_MEM_RASCIIZ("fchmodat(path)", ARG2);
+
+   /* Be strict but ignore fd for absolute path. */
+   if (fd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(fd, "fchmodat", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_mkdirat)
+{
+   /* int mkdirat(int fd, const char *path, mode_t mode); */
+
+   /* Interpret the first argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int fd = (Int) ARG1;
+
+   *flags |= SfMayBlock;
+   PRINT("sys_mkdirat ( %d, %#lx(%s), %ld )", fd, ARG2, (HChar *) ARG2, ARG3);
+   PRE_REG_READ3(long, "mkdirat", int, fd, const char *, path,
+                 vki_mode_t, mode);
+   PRE_MEM_RASCIIZ("mkdirat(path)", ARG2);
+
+   /* Be strict but ignore fd for absolute path. */
+   if (fd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(fd, "mkdirat", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+static void do_statvfs_post(struct vki_statvfs *stats, ThreadId tid)
+{
+   POST_FIELD_WRITE(stats->f_bsize);
+   POST_FIELD_WRITE(stats->f_frsize);
+   POST_FIELD_WRITE(stats->f_blocks);
+   POST_FIELD_WRITE(stats->f_bfree);
+   POST_FIELD_WRITE(stats->f_bavail);
+   POST_FIELD_WRITE(stats->f_files);
+   POST_FIELD_WRITE(stats->f_ffree);
+   POST_FIELD_WRITE(stats->f_favail);
+   POST_FIELD_WRITE(stats->f_fsid);
+   POST_MEM_WRITE((Addr) stats->f_basetype, VG_(strlen)(stats->f_basetype) + 1);
+   POST_FIELD_WRITE(stats->f_flag);
+   POST_FIELD_WRITE(stats->f_namemax);
+   POST_MEM_WRITE((Addr) stats->f_fstr, VG_(strlen)(stats->f_fstr) + 1);
+}
+
+PRE(sys_statvfs)
+{
+   /* int statvfs(const char *path, struct statvfs *buf); */
+   *flags |= SfMayBlock;
+   PRINT("sys_statvfs ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
+   PRE_REG_READ2(long, "statvfs", const char *, path,
+                 struct vki_statvfs *, buf);
+   PRE_MEM_RASCIIZ("statvfs(path)", ARG1);
+   PRE_MEM_WRITE("statvfs(buf)", ARG2, sizeof(struct vki_statvfs));
+}
+
+POST(sys_statvfs)
+{
+   do_statvfs_post((struct vki_statvfs *) ARG2, tid);
+}
+
+PRE(sys_fstatvfs)
+{
+   /* int fstatvfs(int fd, struct statvfs *buf); */
+   *flags |= SfMayBlock;
+   PRINT("sys_fstatvfs ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "fstatvfs", int, fd, struct vki_statvfs *, buf);
+   PRE_MEM_WRITE("fstatvfs(buf)", ARG2, sizeof(struct vki_statvfs));
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "fstatvfs", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_fstatvfs)
+{
+   do_statvfs_post((struct vki_statvfs *) ARG2, tid);
+}
+
+PRE(sys_nfssys)
+{
+   /* int nfssys(enum nfssys_op opcode, void *arg); */
+   *flags |= SfMayBlock;
+   PRINT("sys_nfssys ( %ld, %#lx )", ARG1, ARG2);
+
+   switch (ARG1 /*opcode*/) {
+   case VKI_NFS_REVAUTH:
+      PRE_REG_READ2(long, SC2("nfssys", "nfs_revauth"), int, opcode,
+                    struct vki_nfs_revauth_args *, args);
+      PRE_MEM_READ("nfssys(arg)", ARG2,
+                   sizeof(struct vki_nfs_revauth_args));
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the nfssys call with opcode %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_nfssys)
+{
+   switch (ARG1 /*opcode*/) {
+   case VKI_NFS_REVAUTH:
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_waitid)
+{
+   /* int waitid(idtype_t idtype, id_t id, siginfo_t *infop, int options); */
+   *flags |= SfMayBlock;
+   PRINT("sys_waitid( %ld, %ld, %#lx, %ld )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "waitid", vki_idtype_t, idtype, vki_id_t, id,
+                 siginfo_t *, infop, int, options);
+   PRE_MEM_WRITE("waitid(infop)", ARG3, sizeof(vki_siginfo_t));
+}
+
+POST(sys_waitid)
+{
+   POST_MEM_WRITE(ARG3, sizeof(vki_siginfo_t));
+}
+
+#if defined(SOLARIS_UTIMESYS_SYSCALL)
+PRE(sys_utimesys)
+{
+   /* Kernel: int utimesys(int code, uintptr_t arg1, uintptr_t arg2,
+                           uintptr_t arg3, uintptr_t arg4);
+    */
+
+   switch (ARG1 /*code*/) {
+   case 0:
+      /* Libc: int futimens(int fd, const timespec_t times[2]); */
+      PRINT("sys_utimesys ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, "utimesys", int, code, int, fd,
+                    const vki_timespec_t *, times);
+      if (ARG3)
+         PRE_MEM_READ("utimesys(times)", ARG3, 2 * sizeof(vki_timespec_t));
+
+      /* Be strict. */
+      if (!ML_(fd_allowed)(ARG2, "utimesys", tid, False))
+         SET_STATUS_Failure(VKI_EBADF);
+      break;
+   case 1:
+      {
+         /* Libc: int utimensat(int fd, const char *path,
+                                const timespec_t times[2], int flag);
+          */
+
+         /* Interpret the second argument as 32-bit value even on 64-bit
+            architecture. This is different from Linux, for example, where glibc
+            sign-extends it. */
+         Int fd = (Int) ARG2;
+
+         PRINT("sys_utimesys ( %ld, %d, %#lx(%s), %#lx, %ld )",
+               ARG1, fd, ARG3, (HChar *) ARG3, ARG4, ARG5);
+         PRE_REG_READ5(long, "utimesys", int, code, int, fd, const char *, path,
+                       const vki_timespec_t *, times, int, flag);
+         if (ARG3)
+            PRE_MEM_RASCIIZ("utimesys(path)", ARG3);
+         if (ARG4)
+            PRE_MEM_READ("utimesys(times)", ARG4, 2 * sizeof(vki_timespec_t));
+
+         /* Be strict but ignore fd for absolute path. */
+         if (fd != VKI_AT_FDCWD
+             && ML_(safe_to_deref)((void *) ARG3, 1)
+             && ((HChar *) ARG3)[0] != '/'
+             && !ML_(fd_allowed)(fd, "utimesys", tid, False))
+            SET_STATUS_Failure(VKI_EBADF);
+         break;
+      }
+   default:
+      VG_(unimplemented)("Syswrap of the utimesys call with code %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+#endif /* SOLARIS_UTIMESYS_SYSCALL */
+
+#if defined(SOLARIS_UTIMENSAT_SYSCALL)
+PRE(sys_utimensat)
+{
+   /* int utimensat(int fd, const char *path, const timespec_t times[2],
+                    int flag);
+    */
+
+   /* Interpret the first argument as 32-bit value even on 64-bit architecture.
+      This is different from Linux, for example, where glibc sign-extends it. */
+   Int fd = (Int) ARG1;
+
+   PRINT("sys_utimensat ( %d, %#lx(%s), %#lx, %ld )",
+         fd, ARG2, (HChar *) ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "utimensat", int, fd, const char *, path,
+                 const vki_timespec_t *, times, int, flag);
+   if (ARG2)
+      PRE_MEM_RASCIIZ("utimensat(path)", ARG2);
+   if (ARG3)
+      PRE_MEM_READ("utimensat(times)", ARG3, 2 * sizeof(vki_timespec_t));
+
+   /* Be strict but ignore fd for absolute path. */
+   if (fd != VKI_AT_FDCWD
+       && ML_(safe_to_deref)((void *) ARG2, 1)
+       && ((HChar *) ARG2)[0] != '/'
+       && !ML_(fd_allowed)(fd, "utimensat", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+#endif /* SOLARIS_UTIMENSAT_SYSCALL */
+
+PRE(sys_sigresend)
+{
+   /* int sigresend(int signal, siginfo_t *siginfo, sigset_t *mask); */
+   /* Sends a signal to the calling thread, the mask parameter specifies a new
+      signal mask. */
+
+   /* Static (const) mask accessible from outside of this function. */
+   static vki_sigset_t block_all;
+
+   PRINT("sys_sigresend( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "sigresend", int, signal, vki_siginfo_t *, siginfo,
+                 vki_sigset_t *, mask);
+
+   if (ARG2)
+      PRE_MEM_READ("sigresend(siginfo)", ARG2, sizeof(vki_siginfo_t));
+   PRE_MEM_WRITE("sigresend(mask)", ARG3, sizeof(vki_sigset_t));
+
+   /* Check the signal and mask. */
+   if (!ML_(client_signal_OK)(ARG1)) {
+      SET_STATUS_Failure(VKI_EINVAL);
+   }
+   if (!ML_(safe_to_deref)((void*)ARG3, sizeof(vki_sigset_t))) {
+      SET_STATUS_Failure(VKI_EFAULT);
+   }
+   
+   /* Exit early if there are problems. */
+   if (FAILURE)
+      return;
+
+   /* Save the requested mask to unused ARG4. */
+   ARG4 = ARG3;
+
+   /* Fake the requested sigmask with a block-all mask.  If the syscall
+      suceeds then we will block "all" signals for a few instructions (in
+      syscall-x86-solaris.S) but the correct mask will be almost instantly set
+      again by a call to sigprocmask (also in syscall-x86-solaris.S).  If the
+      syscall fails then the mask is not changed, so everything is ok too. */
+   VG_(sigfillset)(&block_all);
+   ARG3 = (UWord)&block_all;
+
+   /* Check to see if this gave us a pending signal. */
+   *flags |= SfPollAfter;
+
+   if (VG_(clo_trace_signals))
+      VG_(message)(Vg_DebugMsg, "sigresend: resending signal %ld\n", ARG1);
+
+   /* Handle SIGKILL specially. */
+   if (ARG1 == VKI_SIGKILL && ML_(do_sigkill)(tid, -1)) {
+      SET_STATUS_Success(0);
+      return;
+   }
+
+   /* Ask to handle this syscall via the slow route, since that's the only one
+      that sets tst->status to VgTs_WaitSys.  If the result of doing the
+      syscall is an immediate run of async_signalhandler() in m_signals.c,
+      then we need the thread to be properly tidied away. */
+   *flags |= SfMayBlock;
+}
+
+POST(sys_sigresend)
+{
+   /* The syscall succeeded, set the requested mask. */
+   VG_(do_sys_sigprocmask)(tid, VKI_SIG_SETMASK, (vki_sigset_t*)ARG4, NULL);
+
+   if (VG_(clo_trace_signals))
+      VG_(message)(Vg_DebugMsg, "sigresend: resent signal %ld\n", ARG1);
+}
+
+static void mem_priocntlsys_parm_ok(ThreadId tid, Bool pre, Bool reade,
+                                    vki_pc_vaparm_t *parm)
+{
+   if (reade)
+      return;
+
+   if (pre)
+      PRE_FIELD_WRITE("priocntlsys(parm)", parm->pc_parm);
+   else
+      POST_FIELD_WRITE(parm->pc_parm);
+}
+
+static void mem_priocntlsys_parm(ThreadId tid, Bool pre, Bool reade,
+                                 const HChar *clname,
+                                 vki_pc_vaparm_t *parm)
+{
+   /* This function is used to handle the PC_SETXPARMS and PC_GETXPARMS
+      parameters.  In the case of PC_SETXPARMS, the code below merely checks
+      if all parameters are scalar, PRE_MEM_READ() for these parameters is
+      already done by the PC_SETXPARMS handler in PRE(sys_priocntlsys).
+
+      A caller of this function is responsible for checking that clname and
+      &parm->key can be dereferenced. */
+
+   if (VG_STREQ(clname, "RT")) {
+      switch (parm->pc_key) {
+      case VKI_RT_KY_PRI:
+      case VKI_RT_KY_TQSECS:
+      case VKI_RT_KY_TQNSECS:
+      case VKI_RT_KY_TQSIG:
+         /* Scalar values that are stored directly in pc_parm. */
+         mem_priocntlsys_parm_ok(tid, pre, reade, parm);
+         return;
+      }
+   }
+   else if (VG_STREQ(clname, "TS")) {
+      switch (parm->pc_key) {
+      case VKI_TS_KY_UPRILIM:
+      case VKI_TS_KY_UPRI:
+         /* Scalar values that are stored directly in pc_parm. */
+         mem_priocntlsys_parm_ok(tid, pre, reade, parm);
+         return;
+      }
+   }
+   else if (VG_STREQ(clname, "IA")) {
+      switch (parm->pc_key) {
+      case VKI_IA_KY_UPRILIM:
+      case VKI_IA_KY_UPRI:
+      case VKI_IA_KY_MODE:
+         /* Scalar values that are stored directly in pc_parm. */
+         mem_priocntlsys_parm_ok(tid, pre, reade, parm);
+         return;
+      }
+   }
+   else if (VG_STREQ(clname, "FSS")) {
+      switch (parm->pc_key) {
+      case VKI_FSS_KY_UPRILIM:
+      case VKI_FSS_KY_UPRI:
+         /* Scalar values that are stored directly in pc_parm. */
+         mem_priocntlsys_parm_ok(tid, pre, reade, parm);
+         return;
+      }
+   }
+   else if (VG_STREQ(clname, "FX")) {
+      switch (parm->pc_key) {
+      case VKI_FX_KY_UPRILIM:
+      case VKI_FX_KY_UPRI:
+      case VKI_FX_KY_TQSECS:
+      case VKI_FX_KY_TQNSECS:
+         /* Scalar values that are stored directly in pc_parm. */
+         mem_priocntlsys_parm_ok(tid, pre, reade, parm);
+         return;
+      }
+   }
+   else {
+      /* Unknown class. */
+      VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s.",
+                         clname);
+      /*NOTREACHED*/
+   }
+
+   /* The class is known but pc_key is unknown. */
+   VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s "
+                      "and pc_key=%d.", clname, parm->pc_key);
+   /*NOTREACHED*/
+}
+
+PRE(sys_priocntlsys)
+{
+   /* long priocntlsys(int pc_version, procset_t *psp, int cmd, caddr_t arg,
+                       caddr_t arg2); */
+
+   if (ARG1 != 1) {
+      /* Only the first version of priocntlsys is supported by the code below.
+       */
+      VG_(unimplemented)("Syswrap of the priocntlsys where pc_version=%ld.",
+                         ARG1);
+      /*NOTREACHED*/
+   }
+
+   PRINT("sys_priocntlsys ( %ld, %#lx, %ld, %#lx, %#lx )", ARG1, ARG2, ARG3,
+         ARG4, ARG5);
+   PRE_REG_READ5(long, "priocntlsys", int, pc_version, procset_t *, psp,
+                 int, cmd, void *, arg, void *, arg2);
+
+   switch (ARG3 /*cmd*/) {
+   case VKI_PC_GETCID:
+      if (ARG4) {
+         vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
+         PRE_MEM_RASCIIZ("priocntlsys(clname)", (Addr)info->pc_clname);
+         /* The next line says that the complete pcinfo_t structure can be
+            written, but this actually isn't true for pc_clname which is
+            always only read. */
+         PRE_MEM_WRITE("priocntlsys(pcinfo)", ARG4, sizeof(vki_pcinfo_t));
+      }
+      break;
+   case VKI_PC_GETCLINFO:
+      if (ARG4) {
+         vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
+         PRE_FIELD_READ("priocntlsys(cid)", info->pc_cid);
+         /* The next line says that the complete pcinfo_t structure can be
+            written, but this actually isn't true for pc_cid which is
+            always only read. */
+         PRE_MEM_WRITE("priocntlsys(pcinfo)", ARG4, sizeof(vki_pcinfo_t));
+      }
+      break;
+   case VKI_PC_SETPARMS:
+      PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
+      /* The next line says that the complete pcparms_t structure is read
+         which is never actually true (we are too pessimistic here).
+         Unfortunately we can't do better because we don't know what
+         process class is involved. */
+      PRE_MEM_READ("priocntlsys(parms)", ARG4, sizeof(vki_pcparms_t));
+      break;
+   case VKI_PC_GETPARMS:
+      PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
+      PRE_MEM_WRITE("priocntlsys(parms)", ARG4, sizeof(vki_pcparms_t));
+      break;
+   case VKI_PC_GETPRIRANGE:
+      {
+         vki_pcpri_t *pcpri = (vki_pcpri_t*)ARG4;
+         PRE_FIELD_READ("priocntlsys(cid)", pcpri->pc_cid);
+      }
+      PRE_MEM_WRITE("priocntlsys(pri)", ARG4, sizeof(vki_pcpri_t));
+      break;
+   case VKI_PC_DONICE:
+      PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
+      {
+         vki_pcnice_t *nicee = (vki_pcnice_t*)ARG4;
+         PRE_FIELD_READ("priocntlsys(op)", nicee->pc_op);
+         if (ML_(safe_to_deref)(&nicee->pc_op, sizeof(nicee->pc_op))) {
+            switch (nicee->pc_op) {
+            case VKI_PC_GETNICE:
+               PRE_FIELD_WRITE("priocntlsys(val)", nicee->pc_val);
+               break;
+            case VKI_PC_SETNICE:
+               PRE_FIELD_READ("priocntlsys(val)", nicee->pc_val);
+               break;
+            default:
+               VG_(unimplemented)("Syswrap of the priocntlsys call where "
+                                  "cmd=PC_DONICE and pc_op=%d", nicee->pc_op);
+               /*NOTREACHED*/
+               break;
+            }
+         }
+      }
+      break;
+   case VKI_PC_SETXPARMS:
+      PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
+      PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
+      if (ARG5) {
+         vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
+         PRE_FIELD_READ("priocntlsys(vaparmscnt)", parms->pc_vaparmscnt);
+         if (ML_(safe_to_deref)(&parms->pc_vaparmscnt,
+                                sizeof(parms->pc_vaparmscnt))) {
+            vki_uint_t i;
+            PRE_MEM_READ("priocntlsys(parms)", (Addr)parms->pc_parms,
+                         parms->pc_vaparmscnt * sizeof(parms->pc_parms[0]));
+            for (i = 0; i < parms->pc_vaparmscnt; i++) {
+               vki_pc_vaparm_t *parm = &parms->pc_parms[i];
+               if (ML_(safe_to_deref)(parm, sizeof(*parm)) &&
+                   ML_(safe_to_deref)((void*)ARG4, 1))
+                  mem_priocntlsys_parm(tid, True /*pre*/, True /*read*/,
+                                       (HChar*)ARG4, parm);
+            }
+         }
+      }
+      break;
+   case VKI_PC_GETXPARMS:
+      PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
+      if (ARG4)
+         PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
+      if (ARG5) {
+         vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
+         PRE_FIELD_READ("priocntlsys(vaparmscnt)", parms->pc_vaparmscnt);
+         if (ML_(safe_to_deref)(&parms->pc_vaparmscnt,
+                                sizeof(parms->pc_vaparmscnt))) {
+            vki_uint_t i;
+            for (i = 0; i < parms->pc_vaparmscnt; i++) {
+               vki_pc_vaparm_t *parm = &parms->pc_parms[i];
+               PRE_MEM_READ("priocntlsys(parms)", (Addr)&parm->pc_key,
+                            parms->pc_vaparmscnt * sizeof(parm->pc_key));
+               if (ML_(safe_to_deref)(&parm->pc_key,
+                                      sizeof(parm->pc_key))) {
+                  /* First handle PC_KY_CLNAME, then class specific keys.
+                     Note that PC_KY_CLNAME can be used only with
+                     ARG4==NULL && parms->pc_vaparmscnt==1.  We are not so
+                     strict here and handle this special case as a regular
+                     one which makes the code simpler. */
+                  if (parm->pc_key == VKI_PC_KY_CLNAME)
+                     PRE_MEM_WRITE("priocntlsys(clname)", parm->pc_parm,
+                                   VKI_PC_CLNMSZ);
+                  else if (ARG4 && ML_(safe_to_deref)((void*)ARG4, 1))
+                     mem_priocntlsys_parm(tid, True /*pre*/,
+                                          False /*read*/, (HChar*)ARG4,
+                                          parm);
+               }
+            }
+         }
+      }
+      break;
+   case VKI_PC_SETDFLCL:
+      PRE_MEM_RASCIIZ("priocntlsys(clname)", ARG4);
+      break;
+   case VKI_PC_GETDFLCL:
+      if (ARG4) {
+         /* GETDFLCL writes to the ARG4 buffer only if ARG4 isn't NULL.  Also
+            note that if ARG4 is NULL then the syscall succeeds. */
+         PRE_MEM_WRITE("priocntlsys(clname)", ARG4, VKI_PC_CLNMSZ);
+      }
+      break;
+   case VKI_PC_DOPRIO:
+      PRE_MEM_READ("priocntlsys(psp)", ARG2, sizeof(vki_procset_t));
+      {
+         vki_pcprio_t *prio = (vki_pcprio_t*)ARG4;
+         PRE_FIELD_READ("priocntlsys(op)", prio->pc_op);
+         if (ML_(safe_to_deref)(&prio->pc_op, sizeof(prio->pc_op))) {
+            switch (prio->pc_op) {
+            case VKI_PC_GETPRIO:
+               PRE_FIELD_WRITE("priocntlsys(cid)", prio->pc_cid);
+               PRE_FIELD_WRITE("priocntlsys(val)", prio->pc_val);
+               break;
+            case VKI_PC_SETPRIO:
+               PRE_FIELD_READ("priocntlsys(cid)", prio->pc_cid);
+               PRE_FIELD_READ("priocntlsys(val)", prio->pc_val);
+               break;
+            default:
+               VG_(unimplemented)("Syswrap of the priocntlsys call where "
+                                  "cmd=PC_DOPRIO and pc_op=%d", prio->pc_op);
+               /*NOTREACHED*/
+               break;
+            }
+         }
+      }
+      break;
+   case VKI_PC_ADMIN:
+   default:
+      VG_(unimplemented)("Syswrap of the priocntlsys call with cmd %ld.", ARG3);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+static void post_mem_write_priocntlsys_clinfo(ThreadId tid,
+                                              const HChar *clname, Addr clinfo)
+{
+   if (VG_STREQ(clname, "RT"))
+      POST_MEM_WRITE(clinfo, sizeof(vki_rtinfo_t));
+   else if (VG_STREQ(clname, "TS"))
+      POST_MEM_WRITE(clinfo, sizeof(vki_tsinfo_t));
+   else if (VG_STREQ(clname, "IA"))
+      POST_MEM_WRITE(clinfo, sizeof(vki_iainfo_t));
+   else if (VG_STREQ(clname, "FSS"))
+      POST_MEM_WRITE(clinfo, sizeof(vki_fssinfo_t));
+   else if (VG_STREQ(clname, "FX"))
+      POST_MEM_WRITE(clinfo, sizeof(vki_fxinfo_t));
+   else if (VG_STREQ(clname, "SDC")) {
+      /* Relax. */
+   }
+   else {
+      VG_(unimplemented)("Syswrap of the priocntlsys call where clname=%s.",
+                         clname);
+      /*NOTREACHED*/
+   }
+}
+
+POST(sys_priocntlsys)
+{
+   switch (ARG3 /*cmd*/) {
+   case VKI_PC_GETCID:
+      if (ARG4) {
+         vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
+         POST_FIELD_WRITE(info->pc_cid);
+         post_mem_write_priocntlsys_clinfo(tid, info->pc_clname,
+                                           (Addr)&info->pc_clinfo);
+      }
+      break;
+   case VKI_PC_GETCLINFO:
+      if (ARG4) {
+         vki_pcinfo_t *info = (vki_pcinfo_t*)ARG4;
+         POST_MEM_WRITE((Addr)info->pc_clname,
+                        VG_(strlen)((HChar*)info->pc_clname) + 1);
+         post_mem_write_priocntlsys_clinfo(tid, info->pc_clname,
+                                           (Addr)&info->pc_clinfo);
+      }
+      break;
+   case VKI_PC_SETPARMS:
+      /* Relax. */
+      break;
+   case VKI_PC_GETPARMS:
+      /* The next line says that the complete pcparms_t structure is
+         written which is never actually true (we are too optimistic here).
+         Unfortunately we can't do better because we don't know what
+         process class is involved. */
+      POST_MEM_WRITE(ARG4, sizeof(vki_pcparms_t));
+      break;
+   case VKI_PC_GETPRIRANGE:
+      POST_MEM_WRITE(ARG4, sizeof(vki_pcpri_t));
+      break;
+   case VKI_PC_DONICE:
+      {
+         vki_pcnice_t *nicee = (vki_pcnice_t*)ARG4;
+         if (nicee->pc_op == VKI_PC_GETNICE)
+            POST_FIELD_WRITE(nicee->pc_val);
+      }
+      break;
+   case VKI_PC_SETXPARMS:
+      /* Relax. */
+      break;
+   case VKI_PC_GETXPARMS:
+      {
+         vki_pc_vaparms_t *parms = (vki_pc_vaparms_t*)ARG5;
+         vki_uint_t i;
+         for (i = 0; i < parms->pc_vaparmscnt; i++) {
+            vki_pc_vaparm_t *parm = &parms->pc_parms[i];
+            if (parm->pc_key == VKI_PC_KY_CLNAME)
+               POST_MEM_WRITE(parm->pc_parm,
+                              VG_(strlen)((HChar*)(Addr)parm->pc_parm) + 1);
+            else if (ARG4)
+               mem_priocntlsys_parm(tid, False /*pre*/, False /*read*/,
+                                    (HChar*)ARG4, parm);
+         }
+      }
+      break;
+   case VKI_PC_SETDFLCL:
+      /* Relax. */
+      break;
+   case VKI_PC_GETDFLCL:
+      if (ARG4)
+         POST_MEM_WRITE(ARG4, VG_(strlen)((HChar*)ARG4) + 1);
+      break;
+   case VKI_PC_DOPRIO:
+      {
+         vki_pcprio_t *prio = (vki_pcprio_t*)ARG4;
+         if (prio->pc_op == VKI_PC_GETPRIO) {
+            POST_FIELD_WRITE(prio->pc_cid);
+            POST_FIELD_WRITE(prio->pc_val);
+         }
+      }
+      break;
+   case VKI_PC_ADMIN:
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_pathconf)
+{
+   /* long pathconf(const char *path, int name); */
+   PRINT("sys_pathconf ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, ARG2);
+   PRE_REG_READ2(long, "pathconf", const char *, path, int, name);
+   PRE_MEM_RASCIIZ("pathconf(path)", ARG1);
+}
+
+PRE(sys_mmap)
+{
+   /* void *mmap(void *addr, size_t len, int prot, int flags,
+                 int fildes, off_t off); */
+   SysRes r;
+   OffT offset;
+
+   /* Stay sane. */
+   vg_assert(VKI_PAGE_SIZE == 4096);
+   vg_assert(sizeof(offset) == sizeof(ARG6));
+
+   PRINT("sys_mmap ( %#lx, %#lx, %#lx, %#lx, %ld, %#lx )",
+         ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
+   PRE_REG_READ6(long, "mmap", void *, start, vki_size_t, length,
+                 int, prot, int, flags, int, fd, vki_off_t, offset);
+
+   /* Make sure that if off < 0 then it's passed correctly to the generic mmap
+      wraper. */
+   offset = *(OffT*)&ARG6;
+
+   r = ML_(generic_PRE_sys_mmap)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, offset);
+   SET_STATUS_from_SysRes(r);
+}
+
+#if defined(SOLARIS_UUIDSYS_SYSCALL)
+PRE(sys_uuidsys)
+{
+   /* int uuidsys(struct uuid *uuid); */
+   PRINT("sys_uuidsys ( %#lx )", ARG1);
+   PRE_REG_READ1(long, "uuidsys", struct vki_uuid *, uuid);
+   PRE_MEM_WRITE("uuidsys(uuid)", ARG1, sizeof(struct vki_uuid));
+}
+
+POST(sys_uuidsys)
+{
+   POST_MEM_WRITE(ARG1, sizeof(struct vki_uuid));
+}
+#endif /* SOLARIS_UUIDSYS_SYSCALL */
+
+/* Syscall mmapobj emulation. Processes ELF program headers
+   and maps them into correct place in memory. Not an easy task, though.
+   ELF program header of PT_LOAD/PT_SUNWBSS type specifies:
+   o p_vaddr  - actually a memory offset
+   o p_memsz  - total segment size, including text, data and BSS
+   o p_filesz - file-based segment size mapping (includes only text and data);
+                p_memsz - p_filesz is the size of BSS
+   o p_offset - offset into the ELF file where the file-based mapping starts
+ 
+   Several problematic areas to cover here:
+   1. p_offset can contain a value which is not page-aligned. In that case
+      we mmap a part of the file prior to p_offset to make the start address
+      page-aligned.
+   2. Partially unused page after the file-based mapping must be zeroed.
+   3. The first mapping is flagged with MR_HDR_ELF and needs to contain
+      the ELF header. This information is used and verified by the dynamic
+      linker (ld.so.1). */
+static SysRes mmapobj_process_phdrs(ThreadId tid, Int fd,
+                                    vki_mmapobj_result_t *storage,
+                                    vki_uint_t *elements,
+                                    const VKI_ESZ(Ehdr) *ehdr,
+                                    const VKI_ESZ(Phdr) *phdrs)
+{
+#define ADVANCE_PHDR(ehdr, phdr) \
+   (const VKI_ESZ(Phdr) *) ((const HChar *) (phdr) + (ehdr)->e_phentsize)
+
+   SysRes res;
+   Int i;
+   Int first_segment_idx = -1;
+   UInt idx;
+   UInt segments = 0; /* loadable segments */
+   Addr start_addr = 0;
+   Addr end_addr = 0;
+   SizeT max_align = VKI_PAGE_SIZE;
+
+   /* 1. First pass over phdrs - determine number, span and max alignment. */
+   const VKI_ESZ(Phdr) *phdr = phdrs;
+   for (idx = 0; idx < ehdr->e_phnum; idx++, phdr = ADVANCE_PHDR(ehdr, phdr)) {
+      /* Skip this header if no memory is requested. */
+      if (phdr->p_memsz == 0)
+         continue;
+
+      if ((phdr->p_type == VKI_PT_LOAD) || (phdr->p_type == VKI_PT_SUNWBSS)) {
+         Off64T offset = 0;
+
+         if (VG_(clo_trace_syscalls))
+            VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: "
+                             "program header #%u: addr=%#lx type=%#lx "
+                             "prot=%#lx memsz=%#lx filesz=%#lx file "
+                             "offset=%#lx\n", idx, phdr->p_vaddr,
+                             (UWord) phdr->p_type, (UWord) phdr->p_flags,
+                             phdr->p_memsz, phdr->p_filesz, phdr->p_offset);
+
+         if (segments == 0) {
+            first_segment_idx = idx;
+
+            if (phdr->p_filesz == 0) {
+               VG_(unimplemented)("Syswrap of the mmapobj call with the first "
+                                  "loadable ELF program header specifying "
+                                  "p_filesz == 0");
+              /*NOTREACHED*/
+              return res;
+            }
+
+            /* Address of the first segment must be either NULL or within the
+               first page. */
+            if ((ehdr->e_type == VKI_ET_DYN) &&
+                ((phdr->p_vaddr & VKI_PAGEMASK) != 0)) {
+               if (VG_(clo_trace_syscalls))
+                  VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
+                                   "ELF program header #%u does not land on "
+                                   "the first page (vaddr=%#lx)\n", idx,
+                                   phdr->p_vaddr);
+               return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+            }
+
+            start_addr = phdr->p_vaddr;
+            /* The first segment is mapped from the beginning of the file (to
+               include also the ELF header), so include this memory as well.
+               Later on we flag this mapping with MR_HDR_ELF. */
+            offset = phdr->p_offset;
+         }
+
+         if (phdr->p_align > 1) {
+            if ((phdr->p_vaddr % phdr->p_align) !=
+                (phdr->p_offset % phdr->p_align)) {
+               if (VG_(clo_trace_syscalls))
+                  VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
+                                   "ELF program header #%u does not have "
+                                   "congruent offset and vaddr (vaddr=%#lx "
+                                   "file offset=%#lx align=%#lx)\n", idx,
+                                   phdr->p_vaddr, phdr->p_offset,
+                                   phdr->p_align);
+               return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+            }
+         }
+
+         if (phdr->p_vaddr < end_addr) {
+            if (VG_(clo_trace_syscalls))
+               VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
+                                "ELF program header #%u specifies overlaping "
+                                "address (vaddr=%#lx end_addr=%#lx)\n",
+                                idx, phdr->p_vaddr, end_addr);
+            return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+         }
+
+         end_addr = phdr->p_vaddr + phdr->p_memsz + offset;
+         end_addr = VG_PGROUNDUP(end_addr);
+         if (phdr->p_align > max_align) {
+            max_align = phdr->p_align;
+         }
+
+         segments += 1;
+      }
+   }
+
+   /* Alignment check - it should be power of two. */
+   if ((max_align & (max_align - 1)) != 0) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: alignment "
+                          "is not a power of 2 (%#lx)\n", max_align);
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   }
+   vg_assert(max_align >= VKI_PAGE_SIZE);
+
+#if defined(VGP_x86_solaris)
+   if (max_align > VKI_UINT_MAX) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: alignment "
+                          "for 32-bit ELF is >32-bits (%#lx)\n", max_align);
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   }
+#endif /* VGP_x86_solaris */
+
+   if (segments == 0) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: nothing "
+                          "to map (0 loadable segments)");
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   }
+
+   vg_assert(end_addr >= start_addr);
+   SizeT span = end_addr - start_addr;
+   if (span == 0) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: nothing "
+                          "to map (%u loadable segments spanning 0 bytes)\n",
+                          segments);
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   }
+   vg_assert(first_segment_idx >= 0);
+
+   if (segments > *elements) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: too many "
+                          "segments (%u)\n", segments);
+      return VG_(mk_SysRes_Error)(VKI_E2BIG);
+   }
+
+   if (VG_(clo_trace_syscalls))
+      VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: there "
+                       "are %u loadable segments spanning %#lx bytes; max "
+                       "align is %#lx\n", segments, span, max_align);
+
+   /* Now get the aspacemgr oraculum advisory.
+      Later on we mmap file-based and BSS mappings into this address space area
+      as required and leave the holes unmapped. */
+   MapRequest mreq = {MAlign, max_align, span};
+   Bool ok;
+   start_addr = VG_(am_get_advisory)(&mreq, True /* forClient */, &ok);
+   if (!ok) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
+                          "failed to reserve address space of %#lx bytes "
+                          "with alignment %#lx\n", span, max_align);
+      return VG_(mk_SysRes_Error)(VKI_ENOMEM);
+   }
+   vg_assert(VG_ROUNDUP(start_addr, max_align) == start_addr);
+
+   if (VG_(clo_trace_syscalls))
+      VG_(debugLog)(2, "syswrap-solaris", "PRE(sys_mmapobj): address space "
+                       "reserved at: vaddr=%#lx size=%#lx\n", start_addr, span);
+
+   /* This is an utterly ugly hack, the aspacemgr assumes that only one
+      segment is added at the time. However we add here multiple segments so
+      AM_SANITY_CHECK inside the aspacemgr can easily fail. We want to
+      prevent that thus we disable these checks. The scheduler will check the
+      aspacemgr sanity after the syscall. */
+   UInt sanity_level = VG_(clo_sanity_level);
+   VG_(clo_sanity_level) = 1;
+
+   /* 2. Second pass over phdrs - map the program headers and fill in
+         the mmapobj_result_t array. */
+   phdr = phdrs;
+   *elements = 0;
+   for (idx = 0; idx < ehdr->e_phnum; idx++, phdr = ADVANCE_PHDR(ehdr, phdr)) {
+      /* Skip this header if no memory is requested. */
+      if (phdr->p_memsz == 0)
+         continue;
+
+      if ((phdr->p_type == VKI_PT_LOAD) || (phdr->p_type == VKI_PT_SUNWBSS)) {
+         UInt prot = 0;
+         if (phdr->p_flags & VKI_PF_R)
+            prot |= VKI_PROT_READ;
+         if (phdr->p_flags & VKI_PF_W)
+            prot |= VKI_PROT_WRITE;
+         if (phdr->p_flags & VKI_PF_X)
+            prot |= VKI_PROT_EXEC;
+
+         vki_mmapobj_result_t *mrp = &storage[*elements];
+         mrp->mr_addr = (vki_caddr_t) (start_addr + phdr->p_vaddr);
+         mrp->mr_msize = phdr->p_memsz;
+         mrp->mr_fsize = phdr->p_filesz;
+         mrp->mr_offset = 0;
+         mrp->mr_prot = prot;
+         mrp->mr_flags = 0;
+         Off64T file_offset = phdr->p_offset;
+         if (idx == first_segment_idx) {
+            mrp->mr_flags = VKI_MR_HDR_ELF;
+            if (phdr->p_offset > 0) {
+               /* Include the ELF header into the first segment.
+                  This means we ignore p_offset from the program header
+                  and map from file offset 0. */
+               mrp->mr_msize += phdr->p_offset;
+               mrp->mr_fsize += phdr->p_offset;
+               file_offset = 0;
+            }
+         }
+
+         SizeT page_offset = (Addr) mrp->mr_addr & VKI_PAGEOFFSET;
+         if (page_offset > 0) {
+            vg_assert(file_offset >= page_offset);
+            /* Mapping address does not start at the beginning of a page.
+               Therefore include some bytes before to make it page aligned. */
+            mrp->mr_addr -= page_offset;
+            mrp->mr_msize += page_offset;
+            mrp->mr_offset = page_offset;
+            file_offset -= page_offset;
+         }
+         SizeT file_size = mrp->mr_fsize + mrp->mr_offset;
+         if (VG_(clo_trace_syscalls))
+            VG_(debugLog)(2, "syswrap-solaris", "mmapobj_process_phdrs: "
+                             "mmapobj result #%u: addr=%#lx msize=%#lx "
+                             "fsize=%#lx mr_offset=%#lx prot=%#x flags=%#x\n",
+                             *elements, (Addr) mrp->mr_addr,
+                             (UWord) mrp->mr_msize, (UWord) mrp->mr_fsize,
+                             (UWord) mrp->mr_offset, mrp->mr_prot,
+                             mrp->mr_flags);
+
+         UInt flags = VKI_MAP_PRIVATE | VKI_MAP_FIXED;
+         if ((mrp->mr_prot & (VKI_PROT_WRITE | VKI_PROT_EXEC)) ==
+                                                               VKI_PROT_EXEC) {
+            flags |= VKI_MAP_TEXT;
+         } else {
+            flags |= VKI_MAP_INITDATA;
+         }
+
+         /* Determine if there will be partially unused page after file-based
+            mapping. If so, then we need to zero it explicitly afterwards. */
+         Addr mapping_end = (Addr) mrp->mr_addr + file_size;
+         SizeT zeroed_size = VG_PGROUNDUP(mapping_end) - mapping_end;
+         Bool mprotect_needed = False;
+         if ((zeroed_size > 0) && ((prot & VKI_PROT_WRITE) == 0)) {
+            prot |= VKI_PROT_WRITE;
+            mprotect_needed = True;
+         }
+
+         if (file_size > 0) {
+            res = VG_(am_mmap_file_fixed_client_flags)((Addr) mrp->mr_addr,
+                                       file_size, prot, flags, fd, file_offset);
+            if (sr_isError(res)) {
+               if (VG_(clo_trace_syscalls))
+                  VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
+                                   "mmap failed: addr=%#lx size=%#lx prot=%#x "
+                                   "flags=%#x fd=%d file offset=%#llx\n",
+                                   (Addr) mrp->mr_addr, file_size,
+                                   prot, flags, fd, file_offset);
+               goto mmap_error;
+            }
+
+            VG_(debugLog)(1, "syswrap-solaris", "PRE(sys_mmapobj): new "
+                             "segment: vaddr=%#lx size=%#lx prot=%#x "
+                             "flags=%#x fd=%d file offset=%#llx\n",
+                             (Addr) mrp->mr_addr, file_size, mrp->mr_prot,
+                             flags, fd, file_offset);
+         }
+
+         if (zeroed_size > 0) {
+            /* Now zero out the end of partially used page. */
+            VG_(memset)((void *) mapping_end, 0, zeroed_size);
+            if (mprotect_needed) {
+               prot &= ~VKI_PROT_WRITE;
+               res = VG_(do_syscall3)(SYS_mprotect, (Addr) mrp->mr_addr,
+                                      file_size, prot);
+               if (sr_isError(res)) {
+                  if (VG_(clo_trace_syscalls))
+                     VG_(debugLog)(3, "syswrap-solaris",
+                                      "mmapobj_process_phdrs: mprotect failed: "
+                                      "addr=%#lx size=%#lx prot=%#x",
+                                      (Addr) mrp->mr_addr, file_size, prot);
+                  /* Mapping for this segment was already established. */
+                  idx += 1;
+                  goto mmap_error;
+               }
+            }
+         }
+
+         if (file_size > 0) {
+            ML_(notify_core_and_tool_of_mmap)((Addr) mrp->mr_addr, file_size,
+                                              prot, flags, fd, file_offset);
+         }
+
+         /* Page(s) after the mapping backed up by the file are part of BSS.
+            They need to be mmap'ed over with correct flags and will be
+            implicitly zeroed. */
+         mapping_end = VG_PGROUNDUP(mrp->mr_addr + mrp->mr_msize);
+         Addr page_end = VG_PGROUNDUP(mrp->mr_addr + file_size);
+         vg_assert(mapping_end >= page_end);
+         zeroed_size = mapping_end - page_end;
+         if (zeroed_size > 0) {
+            flags = VKI_MAP_FIXED | VKI_MAP_PRIVATE | VKI_MAP_ANONYMOUS;
+            res = VG_(am_mmap_anon_fixed_client)(page_end, zeroed_size, prot);
+            if (sr_isError(res)) {
+               if (VG_(clo_trace_syscalls))
+                  VG_(debugLog)(3, "syswrap-solaris", "mmapobj_process_phdrs: "
+                                   "mmap_anon failed: addr=%#lx size=%#lx "
+                                   "prot=%#x\n", page_end, zeroed_size, prot);
+               idx += 1; /* mapping for this segment was already established */
+               goto mmap_error;
+            }
+
+            VG_(debugLog)(1, "syswrap-solaris", "PRE(sys_mmapobj): new "
+                             "anonymous segment (BSS): vaddr=%#lx size=%#lx "
+                             "prot=%#x\n", page_end, zeroed_size, prot);
+            ML_(notify_core_and_tool_of_mmap)(page_end, zeroed_size,
+                                              prot, flags, -1, 0);
+         }
+
+         VG_(di_notify_mmap)((Addr) mrp->mr_addr, False /*allow_SkFileV*/, fd);
+
+         *elements += 1;
+         vg_assert(*elements <= segments);
+      }
+   }
+
+   /* Restore VG_(clo_sanity_level). The scheduler will perform the aspacemgr
+      sanity check after the syscall. */
+   VG_(clo_sanity_level) = sanity_level;
+
+   return VG_(mk_SysRes_Success)(0);
+
+mmap_error:
+   for (i = idx - 1; i > 0; i--) {
+      Bool discard_translations;
+      Addr addr = (Addr) storage[i].mr_addr;
+
+      VG_(am_munmap_client)(&discard_translations, addr, storage[i].mr_msize);
+      ML_(notify_core_and_tool_of_munmap)(addr, storage[i].mr_msize);
+   }
+   *elements = 0;
+   return res;
+
+#undef ADVANCE_PHDR
+}
+
+static SysRes mmapobj_interpret(ThreadId tid, Int fd,
+                                vki_mmapobj_result_t *storage,
+                                vki_uint_t *elements)
+{
+   SysRes res;
+
+   struct vg_stat stats;
+   if (VG_(fstat)(fd, &stats) != 0) {
+      return VG_(mk_SysRes_Error)(VKI_EBADF);
+   }
+
+   if (stats.size < sizeof(VKI_ESZ(Ehdr))) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: insufficient "
+                          "file size (%lld)\n", stats.size);
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   }
+
+   /* Align the header buffer appropriately. */
+   vki_ulong_t lheader[sizeof(VKI_ESZ(Ehdr)) / sizeof(vki_ulong_t) + 1];
+   HChar *header = (HChar *) &lheader;
+
+   res = VG_(pread)(fd, header, sizeof(VKI_ESZ(Ehdr)), 0);
+   if (sr_isError(res)) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
+                          "header failed\n");
+      return res;
+   } else if (sr_Res(res) != sizeof(VKI_ESZ(Ehdr))) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
+                          "header failed - only %lu bytes out of %lu\n",
+                          sr_Res(res), (UWord) sizeof(VKI_ESZ(Ehdr)));
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   }
+
+   /* Verify file type is ELF. */
+   if ((header[VKI_EI_MAG0] != VKI_ELFMAG0) ||
+       (header[VKI_EI_MAG1] != VKI_ELFMAG1) ||
+       (header[VKI_EI_MAG2] != VKI_ELFMAG2) ||
+       (header[VKI_EI_MAG3] != VKI_ELFMAG3)) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF header "
+                          "missing magic\n");
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   }
+
+   if (header[VKI_EI_CLASS] != VG_ELF_CLASS) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF class "
+                          "mismatch (%u vs %u)\n", header[VKI_EI_CLASS],
+                          VG_ELF_CLASS);
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   }
+
+   VKI_ESZ(Ehdr) *ehdr = (VKI_ESZ(Ehdr) *) header;
+   if (ehdr->e_type != VKI_ET_DYN) {
+      VG_(unimplemented)("Syswrap of the mmapobj call with ELF type %u.",
+                         ehdr->e_type);
+      /*NOTREACHED*/
+      return res;
+   }
+
+   if (ehdr->e_phnum == VKI_PN_XNUM) {
+      VG_(unimplemented)("Syswrap of the mmapobj call with number of ELF "
+                         "program headers == PN_XNUM");
+      /*NOTREACHED*/
+      return res;
+   }
+
+   /* Check alignment. */
+#if defined(VGP_x86_solaris)
+   if (!VG_IS_4_ALIGNED(ehdr->e_phentsize)) {
+#elif defined(VGP_amd64_solaris)
+   if (!VG_IS_8_ALIGNED(ehdr->e_phentsize)) {
+#else
+#  error "Unknown platform"
+#endif
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: ELF header "
+                          "phentsize not aligned properly (%u)\n",
+                          ehdr->e_phentsize);
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   }
+
+   SizeT phdrs_size = ehdr->e_phnum * ehdr->e_phentsize;
+   if (phdrs_size == 0) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: no ELF "
+                          "program headers\n");
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   }
+
+   VKI_ESZ(Phdr) *phdrs = VG_(malloc)("syswrap.mi.1", phdrs_size);
+   res = VG_(pread)(fd, phdrs, phdrs_size, ehdr->e_phoff);
+   if (sr_isError(res)) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
+                          "program headers failed\n");
+      VG_(free)(phdrs);
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   } else if (sr_Res(res) != phdrs_size) {
+      if (VG_(clo_trace_syscalls))
+         VG_(debugLog)(3, "syswrap-solaris", "mmapobj_interpret: read of ELF "
+                          "program headers failed - only %lu bytes out of %lu\n",
+                          sr_Res(res), phdrs_size);
+      VG_(free)(phdrs);
+      return VG_(mk_SysRes_Error)(VKI_ENOTSUP);
+   }
+
+   if (VG_(clo_trace_syscalls))
+      VG_(debugLog)(2, "syswrap-solaris", "mmapobj_interpret: %u ELF "
+                       "program headers with total size of %lu bytes\n",
+                       ehdr->e_phnum, phdrs_size);
+
+   /* Now process the program headers. */
+   res = mmapobj_process_phdrs(tid, fd, storage, elements, ehdr, phdrs);
+   VG_(free)(phdrs);
+   return res;
+}
+
+PRE(sys_mmapobj)
+{
+   /* int mmapobj(int fd, uint_t flags, mmapobj_result_t *storage,
+                  uint_t *elements, void *arg); */
+   PRINT("sys_mmapobj ( %ld, %#lx, %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3,
+         ARG4, ARG5);
+   PRE_REG_READ5(long, "mmapobj", int, fd, vki_uint_t, flags,
+                 mmapobj_result_t *, storage, uint_t *, elements,
+                 void *, arg);
+
+   PRE_MEM_READ("mmapobj(elements)", ARG4, sizeof(vki_uint_t));
+   /*PRE_MEM_WRITE("mmapobj(elements)", ARG4, sizeof(vki_uint_t));*/
+   if (ML_(safe_to_deref)((void*)ARG4, sizeof(vki_uint_t))) {
+      vki_uint_t *u = (vki_uint_t*)ARG4;
+      PRE_MEM_WRITE("mmapobj(storage)", ARG3,
+                    *u * sizeof(vki_mmapobj_result_t));
+   }
+
+   if (ARG2 & VKI_MMOBJ_PADDING)
+      PRE_MEM_READ("mmapobj(arg)", ARG5, sizeof(vki_size_t));
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "mmapobj", tid, False)) {
+      SET_STATUS_Failure(VKI_EBADF);
+      return;
+   }
+
+   /* We cannot advise mmapobj about desired address(es). Unfortunately
+      kernel places mappings from mmapobj at the end of process address
+      space, defeating memcheck's optimized fast 2-level array algorithm.
+      So we need to emulate what mmapobj does in the kernel. */
+
+   /* Sanity check on parameters. */
+   if ((ARG2 & ~VKI_MMOBJ_ALL_FLAGS) != 0) {
+      SET_STATUS_Failure(VKI_EINVAL);
+      return;
+   }
+
+   if (!ML_(safe_to_deref)((void *) ARG4, sizeof(vki_uint_t))) {
+      SET_STATUS_Failure(VKI_EFAULT);
+      return;
+   }
+   vki_uint_t *elements = (vki_uint_t *) ARG4;
+
+   if (*elements > 0) {
+      if (!ML_(safe_to_deref)((void *) ARG3,
+                              *elements * sizeof(vki_mmapobj_result_t))) {
+         SET_STATUS_Failure(VKI_EFAULT);
+         return;
+      }
+   }
+
+   /* For now, supported is only MMOBJ_INTERPRET and no MMOBJ_PADDING. */
+   if (ARG2 != VKI_MMOBJ_INTERPRET) {
+      VG_(unimplemented)("Syswrap of the mmapobj call with flags %ld.", ARG2);
+      /*NOTREACHED*/
+      return;
+   }
+
+   SysRes res = mmapobj_interpret(tid, (Int) ARG1,
+                                  (vki_mmapobj_result_t *) ARG3, elements);
+   SET_STATUS_from_SysRes(res);
+
+   if (!sr_isError(res)) {
+      POST_MEM_WRITE(ARG4, sizeof(vki_uint_t));
+
+      UInt idx;
+      for (idx = 0; idx < *(vki_uint_t *) ARG4; idx++) {
+         vki_mmapobj_result_t *mrp = &((vki_mmapobj_result_t *) ARG3)[idx];
+         POST_FIELD_WRITE(mrp->mr_addr);
+         POST_FIELD_WRITE(mrp->mr_msize);
+         POST_FIELD_WRITE(mrp->mr_fsize);
+         POST_FIELD_WRITE(mrp->mr_prot);
+         POST_FIELD_WRITE(mrp->mr_flags);
+         POST_FIELD_WRITE(mrp->mr_offset);
+      }
+   }
+}
+
+PRE(sys_memcntl)
+{
+   /* int memcntl(caddr_t addr, size_t len, int cmd, caddr_t arg,
+                  int attr, int mask); */
+   PRINT("sys_memcntl ( %#lx, %#lx, %ld, %#lx, %#lx, %#lx )", ARG1, ARG2,
+         ARG3, ARG4, ARG5, ARG6);
+   PRE_REG_READ6(long, "memcntl", void *, addr, vki_size_t, len, int, cmd,
+                 void *, arg, int, attr, int, mask);
+
+   if (ARG3 != VKI_MC_LOCKAS && ARG3 != VKI_MC_UNLOCKAS &&
+       !ML_(valid_client_addr)(ARG1, ARG2, tid, "memcntl")) {
+      /* MC_LOCKAS and MC_UNLOCKAS work on the complete address space thus we
+         don't check the address range validity if these commands are
+         requested. */
+      SET_STATUS_Failure(VKI_ENOMEM);
+      return;
+   }
+
+   if (ARG3 == VKI_MC_HAT_ADVISE)
+      PRE_MEM_READ("memcntl(arg)", ARG4, sizeof(struct vki_memcntl_mha));
+}
+
+PRE(sys_getpmsg)
+{
+   /* int getpmsg(int fildes, struct strbuf *ctlptr, struct strbuf *dataptr,
+                  int *bandp, int *flagsp); */
+   struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
+   struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
+   *flags |= SfMayBlock;
+   PRINT("sys_getpmsg ( %ld, %#lx, %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3,
+         ARG4, ARG5);
+   PRE_REG_READ5(long, "getpmsg", int, fildes, struct vki_strbuf *, ctlptr,
+                 struct vki_strbuf *, dataptr, int *, bandp, int *, flagsp);
+   if (ctrlptr) {
+      PRE_FIELD_READ("getpmsg(ctrlptr->maxlen)", ctrlptr->maxlen);
+      PRE_FIELD_WRITE("getpmsg(ctrlptr->len)", ctrlptr->len);
+      PRE_FIELD_READ("getpmsg(ctrlptr->buf)", ctrlptr->buf);
+      if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
+          && ctrlptr->maxlen > 0)
+         PRE_MEM_WRITE("getpmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
+                       ctrlptr->maxlen);
+   }
+   if (dataptr) {
+      PRE_FIELD_READ("getpmsg(dataptr->maxlen)", dataptr->maxlen);
+      PRE_FIELD_WRITE("getpmsg(dataptr->len)", dataptr->len);
+      PRE_FIELD_READ("getpmsg(dataptr->buf)", dataptr->buf);
+      if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
+          && dataptr->maxlen > 0)
+         PRE_MEM_WRITE("getpmsg(dataptr->buf)", (Addr)dataptr->buf,
+                       dataptr->maxlen);
+   }
+   PRE_MEM_READ("getpmsg(bandp)", ARG4, sizeof(int));
+   /*PRE_MEM_WRITE("getpmsg(bandp)", ARG4, sizeof(int));*/
+   PRE_MEM_READ("getpmsg(flagsp)", ARG5, sizeof(int));
+   /*PRE_MEM_WRITE("getpmsg(flagsp)", ARG5, sizeof(int));*/
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "getpmsg", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_getpmsg)
+{
+   struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
+   struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
+
+   if (ctrlptr && ctrlptr->len > 0)
+      POST_MEM_WRITE((Addr)ctrlptr->buf, ctrlptr->len);
+   if (dataptr && dataptr->len > 0)
+      POST_MEM_WRITE((Addr)dataptr->buf, dataptr->len);
+   POST_MEM_WRITE(ARG4, sizeof(int));
+   POST_MEM_WRITE(ARG5, sizeof(int));
+}
+
+PRE(sys_putpmsg)
+{
+   /* int putpmsg(int fildes, const struct strbuf *ctlptr,
+                  const struct strbuf *dataptr, int band, int flags); */
+   struct vki_strbuf *ctrlptr = (struct vki_strbuf *)ARG2;
+   struct vki_strbuf *dataptr = (struct vki_strbuf *)ARG3;
+   *flags |= SfMayBlock;
+   PRINT("sys_putpmsg ( %ld, %#lx, %#lx, %ld, %ld )", ARG1, ARG2, ARG3, ARG4,
+         ARG5);
+   PRE_REG_READ5(long, "putpmsg", int, fildes, struct vki_strbuf *, ctrlptr,
+                 struct vki_strbuf *, dataptr, int, band, int, flags);
+   if (ctrlptr) {
+      PRE_FIELD_READ("putpmsg(ctrlptr->len)", ctrlptr->len);
+      PRE_FIELD_READ("putpmsg(ctrlptr->buf)", ctrlptr->buf);
+      if (ML_(safe_to_deref)((void*)ARG2, sizeof(struct vki_strbuf))
+          && ctrlptr->len > 0)
+         PRE_MEM_READ("putpmsg(ctrlptr->buf)", (Addr)ctrlptr->buf,
+                      ctrlptr->len);
+   }
+   if (dataptr) {
+      PRE_FIELD_READ("putpmsg(dataptr->len)", dataptr->len);
+      PRE_FIELD_READ("putpmsg(dataptr->buf)", dataptr->buf);
+      if (ML_(safe_to_deref)((void*)ARG3, sizeof(struct vki_strbuf))
+          && dataptr->len > 0)
+         PRE_MEM_READ("putpmsg(dataptr->buf)", (Addr)dataptr->buf,
+                      dataptr->len);
+   }
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "putpmsg", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+#if defined(SOLARIS_OLD_SYSCALLS)
+PRE(sys_rename)
+{
+   /* int rename(const char *from, const char *to); */
+
+   *flags |= SfMayBlock;
+   PRINT("sys_rename ( %#lx(%s), %#lx(%s) )",
+         ARG1, (HChar *) ARG1, ARG2, (char *) ARG2);
+   PRE_REG_READ2(long, "rename", const char *, from, const char *, to);
+
+   PRE_MEM_RASCIIZ("rename(from)", ARG1);
+   PRE_MEM_RASCIIZ("rename(to)", ARG2);
+}
+#endif /* SOLARIS_OLD_SYSCALLS */
+
+PRE(sys_uname)
+{
+   /* int uname(struct utsname *name); */
+   PRINT("sys_uname ( %#lx )", ARG1);
+   PRE_REG_READ1(long, "uname", struct vki_utsname *, name);
+   PRE_MEM_WRITE("uname(name)", ARG1, sizeof(struct vki_utsname));
+}
+
+POST(sys_uname)
+{
+   struct vki_utsname *name = (struct vki_utsname *) ARG1;
+   POST_MEM_WRITE((Addr) name->sysname, VG_(strlen)(name->sysname) + 1);
+   POST_MEM_WRITE((Addr) name->nodename, VG_(strlen)(name->nodename) + 1);
+   POST_MEM_WRITE((Addr) name->release, VG_(strlen)(name->release) + 1);
+   POST_MEM_WRITE((Addr) name->version, VG_(strlen)(name->version) + 1);
+   POST_MEM_WRITE((Addr) name->machine, VG_(strlen)(name->machine) + 1);
+}
+
+PRE(sys_setegid)
+{
+   /* int setegid(gid_t egid); */
+   PRINT("sys_setegid ( %ld )", ARG1);
+   PRE_REG_READ1(long, "setegid", vki_gid_t, egid);
+}
+
+PRE(sys_sysconfig)
+{
+   /* long sysconf(int name); */
+   PRINT("sys_sysconfig ( %ld )", ARG1);
+   PRE_REG_READ1(long, "sysconf", int, name);
+
+   if (ARG1 == VKI_CONFIG_OPEN_FILES)
+      SET_STATUS_Success(VG_(fd_soft_limit));
+}
+
+PRE(sys_systeminfo)
+{
+   /* int sysinfo(int command, char *buf, long count); */
+   PRINT("sys_systeminfo ( %ld, %#lx, %ld )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "sysinfo", int, command, char *, buf, long, count);
+
+   switch (ARG1 /*command*/) {
+   case VKI_SI_SYSNAME:
+   case VKI_SI_HOSTNAME:
+   case VKI_SI_RELEASE:
+   case VKI_SI_VERSION:
+   case VKI_SI_MACHINE:
+   case VKI_SI_ARCHITECTURE:
+   case VKI_SI_HW_SERIAL:
+   case VKI_SI_HW_PROVIDER:
+   case VKI_SI_SRPC_DOMAIN:
+   case VKI_SI_PLATFORM:
+   case VKI_SI_ISALIST:
+   case VKI_SI_DHCP_CACHE:
+   case VKI_SI_ARCHITECTURE_32:
+   case VKI_SI_ARCHITECTURE_64:
+   case VKI_SI_ARCHITECTURE_K:
+   case VKI_SI_ARCHITECTURE_NATIVE:
+      PRE_MEM_WRITE("sysinfo(buf)", ARG2, ARG3);
+      break;
+
+   case VKI_SI_SET_HOSTNAME:
+   case VKI_SI_SET_SRCP_DOMAIN:
+      PRE_MEM_RASCIIZ("sysinfo(buf)", ARG2);
+      break;
+
+   default:
+      VG_(unimplemented)("Syswrap of the sysinfo call with command %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_systeminfo)
+{
+   if (ARG1 != VKI_SI_SET_HOSTNAME && ARG1 != VKI_SI_SET_SRCP_DOMAIN)
+      POST_MEM_WRITE(ARG2, MIN(RES, ARG3));
+}
+
+PRE(sys_seteuid)
+{
+   /* int seteuid(uid_t euid); */
+   PRINT("sys_seteuid ( %ld )", ARG1);
+   PRE_REG_READ1(long, "seteuid", vki_uid_t, euid);
+}
+
+PRE(sys_forksys)
+{
+   /* int64_t forksys(int subcode, int flags); */
+   Int fds[2];
+   Int res;
+   PRINT("sys_forksys ( %ld, %ld )", ARG1, ARG2);
+   PRE_REG_READ2(long, "forksys", int, subcode, int, flags);
+
+   if (ARG1 == 1) {
+      /* Support for forkall() requires changes to the big lock processing
+         which are not yet implemented. */
+      VG_(unimplemented)("Support for forkall().");
+      /*NOTREACHED*/
+      return;
+   }
+
+   if (ARG1 != 0 && ARG1 != 2) {
+      VG_(unimplemented)("Syswrap of the forksys call where subcode=%ld.",
+                         ARG1);
+      /*NOTREACHED*/
+   }
+
+   if (ARG1 == 2) {
+      /* vfork() is requested. Translate it to a normal fork() but work around
+         a problem with posix_spawn() which relies on the real vfork()
+         behaviour. See a description in vg_preloaded.c for details. */
+      res = VG_(pipe)(fds);
+      vg_assert(res == 0);
+
+      vg_assert(fds[0] != fds[1]);
+
+      /* Move to Valgrind fds and set close-on-exec flag on both of them (done
+         by VG_(safe_fd). */
+      fds[0] = VG_(safe_fd)(fds[0]);
+      fds[1] = VG_(safe_fd)(fds[1]);
+      vg_assert(fds[0] != fds[1]);
+
+      vg_assert(VG_(vfork_fildes_addr) != NULL);
+      vg_assert(*VG_(vfork_fildes_addr) == -1);
+      *VG_(vfork_fildes_addr) = fds[0];
+   }
+
+   VG_(do_atfork_pre)(tid);
+   SET_STATUS_from_SysRes(VG_(do_syscall2)(__NR_forksys, 0, ARG2));
+
+   if (!SUCCESS) {
+      /* vfork */
+      if (ARG1 == 2) {
+         VG_(close)(fds[0]);
+         VG_(close)(fds[1]);
+      }
+
+      return;
+   }
+
+   if (RESHI) {
+      VG_(do_atfork_child)(tid);
+
+      /* If --child-silent-after-fork=yes was specified, set the output file
+         descriptors to 'impossible' values.  This is noticed by
+         send_bytes_to_logging_sink() in m_libcprint.c, which duly stops
+         writing any further output. */
+      if (VG_(clo_child_silent_after_fork)) {
+         if (!VG_(log_output_sink).is_socket)
+            VG_(log_output_sink).fd = -1;
+         if (!VG_(xml_output_sink).is_socket)
+            VG_(xml_output_sink).fd = -1;
+      }
+
+      /* vfork */
+      if (ARG1 == 2)
+         VG_(close)(fds[1]);
+   }
+   else {
+      VG_(do_atfork_parent)(tid);
+
+      /* Print information about the fork. */
+      PRINT("   fork: process %d created child %d\n", VG_(getpid)(),
+            (Int)RES);
+
+      /* vfork */
+      if (ARG1 == 2) {
+         /* Wait for the child to finish (exec or exit). */
+         UChar w;
+
+         VG_(close)(fds[0]);
+
+         res = VG_(read)(fds[1], &w, 1);
+         if (res == 1)
+            SET_STATUS_Failure(w);
+         VG_(close)(fds[1]);
+
+         *VG_(vfork_fildes_addr) = -1;
+      }
+   }
+}
+
+PRE(sys_sigtimedwait)
+{
+   /* int sigtimedwait(const sigset_t *set, siginfo_t *info,
+                       const timespec_t *timeout); */
+   *flags |= SfMayBlock;
+   PRINT("sys_sigtimedwait ( %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "sigtimedwait", vki_sigset_t *, set,
+                 vki_siginfo_t *, info, vki_timespec_t *, timeout);
+   PRE_MEM_READ("sigtimewait(set)", ARG1, sizeof(vki_sigset_t));
+   if (ARG2)
+      PRE_MEM_WRITE("sigtimedwait(info)", ARG2, sizeof(vki_siginfo_t));
+   if (ARG3)
+      PRE_MEM_READ("sigtimedwait(timeout)", ARG3, sizeof(vki_timespec_t));
+}
+
+POST(sys_sigtimedwait)
+{
+   if (ARG2)
+      POST_MEM_WRITE(ARG2, sizeof(vki_siginfo_t));
+}
+
+PRE(sys_yield)
+{
+   /* void yield(void); */
+   *flags |= SfMayBlock;
+   PRINT("sys_yield ( )");
+   PRE_REG_READ0(long, "yield");
+}
+
+PRE(sys_lwp_sema_post)
+{
+   /* int lwp_sema_post(lwp_sema_t *sema); */
+   vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
+   *flags |= SfMayBlock;
+   PRINT("sys_lwp_sema_post ( %#lx )", ARG1);
+   PRE_REG_READ1(long, "lwp_sema_post", lwp_sema_t *, sema);
+
+   PRE_FIELD_READ("lwp_sema_post(sema->type)", sema->vki_sema_type);
+   PRE_FIELD_READ("lwp_sema_post(sema->count)", sema->vki_sema_count);
+   /*PRE_FIELD_WRITE("lwp_sema_post(sema->count)", sema->vki_sema_count);*/
+   PRE_FIELD_READ("lwp_sema_post(sema->waiters)", sema->vki_sema_waiters);
+   /*PRE_FIELD_WRITE("lwp_sema_post(sema->waiters)", sema->vki_sema_waiters);*/
+}
+
+POST(sys_lwp_sema_post)
+{
+   vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
+   POST_FIELD_WRITE(sema->vki_sema_count);
+   POST_FIELD_WRITE(sema->vki_sema_waiters);
+}
+
+PRE(sys_lwp_sema_trywait)
+{
+   /* int lwp_sema_trywait(lwp_sema_t *sema); */
+   vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
+   PRINT("sys_lwp_sema_trywait ( %#lx )", ARG1);
+   PRE_REG_READ1(long, "lwp_sema_trywait", lwp_sema_t *, sema);
+
+   PRE_FIELD_READ("lwp_sema_trywait(sema->type)", sema->vki_sema_type);
+   PRE_FIELD_READ("lwp_sema_trywait(sema->count)", sema->vki_sema_count);
+   /*PRE_FIELD_WRITE("lwp_sema_trywait(sema->count)", sema->vki_sema_count);*/
+   PRE_FIELD_READ("lwp_sema_trywait(sema->waiters)", sema->vki_sema_waiters);
+   /*PRE_FIELD_WRITE("lwp_sema_trywait(sema->waiters)",
+     sema->vki_sema_waiters);*/
+}
+
+POST(sys_lwp_sema_trywait)
+{
+   vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
+   POST_FIELD_WRITE(sema->vki_sema_count);
+   POST_FIELD_WRITE(sema->vki_sema_waiters);
+}
+
+PRE(sys_lwp_detach)
+{
+   /* int lwp_detach(id_t lwpid); */
+   PRINT("sys_lwp_detach ( %ld )", ARG1);
+   PRE_REG_READ1(long, "lwp_detach", vki_id_t, lwpid);
+}
+
+PRE(sys_fchroot)
+{
+   /* int fchroot(int fd); */
+   PRINT("sys_fchroot ( %ld )", ARG1);
+   PRE_REG_READ1(long, "fchroot", int, fd);
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "fchroot", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_gettimeofday)
+{
+   /* Kernel: int gettimeofday(struct timeval *tp); */
+   PRINT("sys_gettimeofday ( %#lx )", ARG1);
+   PRE_REG_READ1(long, "gettimeofday", struct timeval *, tp);
+   if (ARG1)
+      PRE_timeval_WRITE("gettimeofday(tp)", ARG1);
+}
+
+POST(sys_gettimeofday)
+{
+   if (ARG1)
+      POST_timeval_WRITE(ARG1);
+}
+
+PRE(sys_lwp_create)
+{
+   /* int lwp_create(ucontext_t *ucp, int flags, id_t *new_lwp) */
+
+   ThreadId ctid;
+   ThreadState *ptst;
+   ThreadState *ctst;
+   Addr stack;
+   SysRes res;
+   vki_ucontext_t uc;
+   Bool tool_informed = False;
+
+   PRINT("sys_lwp_create ( %#lx, %ld, %#lx )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "lwp_create", ucontext_t *, ucp, int, flags,
+                 id_t *, new_lwp);
+
+   if (ARG3 != 0)
+      PRE_MEM_WRITE("lwp_create(new_lwp)", ARG3, sizeof(vki_id_t));
+
+   /* If we can't deref ucontext_t then we can't do anything. */
+   if (!ML_(safe_to_deref)((void*)ARG1, sizeof(vki_ucontext_t))) {
+      SET_STATUS_Failure(VKI_EINVAL);
+      return;
+   }
+
+   ctid = VG_(alloc_ThreadState)();
+   ptst = VG_(get_ThreadState)(tid);
+   ctst = VG_(get_ThreadState)(ctid);
+
+   /* Stay sane. */
+   vg_assert(VG_(is_running_thread)(tid));
+   vg_assert(VG_(is_valid_tid)(ctid));
+
+   stack = ML_(allocstack)(ctid);
+   if (!stack) {
+      res = VG_(mk_SysRes_Error)(VKI_ENOMEM);
+      goto out;
+   }
+
+   /* First inherit parent's guest state */
+   ctst->arch.vex = ptst->arch.vex;
+   ctst->arch.vex_shadow1 = ptst->arch.vex_shadow1;
+   ctst->arch.vex_shadow2 = ptst->arch.vex_shadow2;
+
+   /* Set up some values. */
+   ctst->os_state.parent = tid;
+   ctst->os_state.threadgroup = ptst->os_state.threadgroup;
+   ctst->sig_mask = ptst->sig_mask;
+   ctst->tmp_sig_mask = ptst->sig_mask;
+
+   /* No stack definition should be currently present.  The stack will be set
+      later by libc by a setustack() call (the getsetcontext syscall). */
+   ctst->client_stack_highest_byte = 0;
+   ctst->client_stack_szB = 0;
+   vg_assert(ctst->os_state.stk_id == (UWord)(-1));
+
+   /* Inform a tool that a new thread is created.  This has to be done before
+      any other core->tool event is sent. */
+   vg_assert(VG_(owns_BigLock_LL)(tid));
+   VG_TRACK(pre_thread_ll_create, tid, ctid);
+   tool_informed = True;
+
+#if defined(VGP_x86_solaris)
+   /* Set up GDT (this has to be done before calling
+      VG_(restore_context)(). */
+   ML_(setup_gdt)(&ctst->arch.vex);
+#elif defined(VGP_amd64_solaris)
+   /* Nothing to do. */
+#else
+#  error "Unknown platform"
+#endif
+
+   /* Now set up the new thread according to ucontext_t. */
+   VG_(restore_context)(ctid, (vki_ucontext_t*)ARG1, Vg_CoreSysCall,
+                        True/*esp_is_thrptr*/);
+
+   /* Set up V thread (this also tells the kernel to block all signals in the
+      thread). */
+   ML_(setup_start_thread_context)(ctid, &uc);
+
+   /* Actually create the new thread. */
+   res = VG_(do_syscall3)(__NR_lwp_create, (UWord)&uc, ARG2, ARG3);
+
+   if (!sr_isError(res)) {
+      if (ARG3 != 0)
+         POST_MEM_WRITE(ARG3, sizeof(vki_id_t));
+      if (ARG2 & VKI_LWP_DAEMON)
+         ctst->os_state.daemon_thread = True;
+   }
+
+out:
+   if (sr_isError(res)) {
+      if (tool_informed) {
+         /* Tell a tool the thread exited in a hurry. */
+         VG_TRACK(pre_thread_ll_exit, ctid);
+      }
+
+      /* lwp_create failed. */
+      VG_(cleanup_thread)(&ctst->arch);
+      ctst->status = VgTs_Empty;
+   }
+
+   SET_STATUS_from_SysRes(res);
+}
+
+PRE(sys_lwp_exit)
+{
+   /* void syslwp_exit(); */
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   PRINT("sys_lwp_exit ( )");
+   PRE_REG_READ0(long, "lwp_exit");
+
+   /* Set the thread's status to be exiting, then claim that the syscall
+      succeeded. */
+   tst->exitreason = VgSrc_ExitThread;
+   tst->os_state.exitcode = 0;
+   SET_STATUS_Success(0);
+}
+
+PRE(sys_lwp_suspend)
+{
+   /* int lwp_suspend(id_t lwpid); */
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   PRINT("sys_lwp_suspend ( %ld )", ARG1);
+   PRE_REG_READ1(long, "lwp_suspend", vki_id_t, lwpid);
+
+   if (ARG1 == tst->os_state.lwpid) {
+      /* Set the SfMayBlock flag only if the currently running thread should
+         be suspended. If this flag was used also when suspending other
+         threads then it could happen that a thread holding the_BigLock would
+         be suspended and Valgrind would hang. */
+      *flags |= SfMayBlock;
+   }
+}
+
+PRE(sys_lwp_continue)
+{
+   /* int lwp_continue(id_t target_lwp); */
+   PRINT("sys_lwp_continue ( %ld )", ARG1);
+   PRE_REG_READ1(long, "lwp_continue", vki_id_t, target_lwp);
+}
+
+static void
+do_lwp_sigqueue(const HChar *syscall_name, UWord target_lwp, UWord signo,
+                SyscallStatus *status, UWord *flags)
+{
+   if (!ML_(client_signal_OK)(signo)) {
+      SET_STATUS_Failure(VKI_EINVAL);
+      return;
+   }
+
+   /* Check to see if this gave us a pending signal. */
+   *flags |= SfPollAfter;
+
+   if (VG_(clo_trace_signals))
+      VG_(message)(Vg_DebugMsg, "%s: sending signal %ld to thread %ld\n",
+                   syscall_name, signo, target_lwp);
+
+   /* If we're sending SIGKILL, check to see if the target is one of our
+      threads and handle it specially. */
+   if (signo == VKI_SIGKILL && ML_(do_sigkill)(target_lwp, -1)) {
+      SET_STATUS_Success(0);
+      return;
+   }
+
+   /* Ask to handle this syscall via the slow route, since that's the only one
+      that sets tst->status to VgTs_WaitSys.  If the result of doing the
+      syscall is an immediate run of async_signalhandler() in m_signals.c,
+      then we need the thread to be properly tidied away. */
+   *flags |= SfMayBlock;
+}
+
+#if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
+#if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID)
+PRE(sys_lwp_sigqueue)
+{
+   /* int lwp_sigqueue(pid_t target_pid, id_t target_lwp, int signal,
+                       void *value, int si_code, timespec_t *timeout);
+    */
+   PRINT("sys_lwp_sigqueue ( %ld, %ld, %ld, %#lx, %ld, %#lx )",
+         ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
+   PRE_REG_READ6(long, "lwp_sigqueue", vki_pid_t, target_pid,
+                 vki_id_t, target_lwp, int, signal, void *, value, int, si_code,
+                 vki_timespec_t *, timeout);
+
+   if (ARG6)
+      PRE_MEM_READ("lwp_sigqueue(timeout)", ARG6, sizeof(vki_timespec_t));
+
+   if ((ARG1 == 0) || (ARG1 == VG_(getpid)())) {
+      do_lwp_sigqueue("lwp_sigqueue", ARG2, ARG3, status, flags);
+   } else {
+      /* Signal is sent to a different process. */
+      if (VG_(clo_trace_signals))
+         VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sending signal %ld to "
+                      "process %ld, thread %ld\n", ARG3, ARG1, ARG2);
+     *flags |= SfMayBlock;
+   }
+}
+
+POST(sys_lwp_sigqueue)
+{
+   if (VG_(clo_trace_signals))
+      VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sent signal %ld to process %ld, "
+                   "thread %ld\n", ARG3, ARG1, ARG2);
+}
+
+#else
+
+PRE(sys_lwp_sigqueue)
+{
+   /* int lwp_sigqueue(id_t target_lwp, int signal, void *value,
+                       int si_code, timespec_t *timeout);
+    */
+   PRINT("sys_lwp_sigqueue ( %ld, %ld, %#lx, %ld, %#lx )",
+         ARG1, ARG2, ARG3, ARG4, ARG5);
+   PRE_REG_READ5(long, "lwp_sigqueue", vki_id_t, target_lwp, int, signal,
+                 void *, value, int, si_code, vki_timespec_t *, timeout);
+
+   if (ARG5)
+      PRE_MEM_READ("lwp_sigqueue(timeout)", ARG5, sizeof(vki_timespec_t));
+
+   do_lwp_sigqueue("lwp_sigqueue", ARG1, ARG2, status, flags);
+}
+
+POST(sys_lwp_sigqueue)
+{
+   if (VG_(clo_trace_signals))
+      VG_(message)(Vg_DebugMsg, "lwp_sigqueue: sent signal %ld to thread %ld\n",
+                   ARG2, ARG1);
+}
+
+
+#endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL_TAKES_PID */
+
+#else
+
+PRE(sys_lwp_kill)
+{
+   /* int lwp_kill(id_t target_lwp, int signal); */
+   PRINT("sys_lwp_kill ( %ld, %ld )", ARG1, ARG2);
+   PRE_REG_READ2(long, "lwp_kill", vki_id_t, target_lwp, int, signal);
+
+   do_lwp_sigqueue("lwp_kill", ARG1, ARG2, status, flags);
+}
+
+POST(sys_lwp_kill)
+{
+   if (VG_(clo_trace_signals))
+      VG_(message)(Vg_DebugMsg, "lwp_kill: sent signal %ld to thread %ld\n",
+                   ARG2, ARG1);
+}
+#endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
+
+PRE(sys_lwp_self)
+{
+   /* id_t lwp_self(void); */
+   PRINT("sys_lwp_self ( )");
+   PRE_REG_READ0(long, "lwp_self");
+}
+
+PRE(sys_lwp_sigmask)
+{
+   /* int64_t lwp_sigmask(int how, uint_t bits0, uint_t bits1, uint_t bits2,
+                          uint_t bits3); */
+   vki_sigset_t sigset;
+   PRINT("sys_lwp_sigmask ( %ld, %#lx, %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3,
+         ARG4, ARG5);
+   PRE_REG_READ5(long, "lwp_sigmask", int, how, vki_uint_t, bits0,
+                 vki_uint_t, bits1, vki_uint_t, bits2, vki_uint_t, bits3);
+
+   sigset.__sigbits[0] = ARG2;
+   sigset.__sigbits[1] = ARG3;
+   sigset.__sigbits[2] = ARG4;
+   sigset.__sigbits[3] = ARG5;
+
+   SET_STATUS_from_SysRes(
+      VG_(do_sys_sigprocmask)(tid, ARG1 /*how*/, &sigset, NULL)
+   );
+
+   if (SUCCESS)
+      *flags |= SfPollAfter;
+}
+
+PRE(sys_lwp_private)
+{
+   /* int lwp_private(int cmd, int which, uintptr_t base); */
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   Int supported_base, supported_sel;
+   PRINT("sys_lwp_private ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "lwp_private", int, cmd, int, which,
+                 uintptr_t, base);
+
+   /* Note: Only the %gs base is currently supported on x86 and the %fs base
+      on amd64.  Support for the %fs base on x86 and for the %gs base on amd64
+      should be added.  Anything else is probably a client program error. */
+#if defined(VGP_x86_solaris)
+   supported_base = VKI_LWP_GSBASE;
+   supported_sel = VKI_LWPGS_SEL;
+#elif defined(VGP_amd64_solaris)
+   supported_base = VKI_LWP_FSBASE;
+   supported_sel = 0;
+#else
+#error "Unknown platform"
+#endif
+   if (ARG2 != supported_base) {
+      VG_(unimplemented)("Syswrap of the lwp_private call where which=%ld.",
+                         ARG2);
+      /*NOTREACHED*/
+   }
+
+   switch (ARG1 /*cmd*/) {
+   case VKI_LWP_SETPRIVATE:
+#if defined(VGP_x86_solaris)
+      tst->os_state.thrptr = ARG3;
+      ML_(update_gdt_lwpgs)(tid);
+#elif defined(VGP_amd64_solaris)
+      tst->arch.vex.guest_FS_CONST = ARG3;
+#else
+#error "Unknown platform"
+#endif
+      SET_STATUS_Success(supported_sel);
+      break;
+   case VKI_LWP_GETPRIVATE:
+      {
+         int thrptr;
+#if defined(VGP_x86_solaris)
+         thrptr = tst->os_state.thrptr;
+#elif defined(VGP_amd64_solaris)
+         thrptr = tst->arch.vex.guest_FS_CONST;
+#else
+#error "Unknown platform"
+#endif
+
+         if (thrptr == 0) {
+            SET_STATUS_Failure(VKI_EINVAL);
+            return;
+         }
+
+#if defined(VGP_x86_solaris)
+         if (tst->arch.vex.guest_GS != supported_sel) {
+            SET_STATUS_Failure(VKI_EINVAL);
+            return;
+         }
+#elif defined(VGP_amd64_solaris)
+         /* Valgrind on amd64 does not allow to change the gs register so
+            a check that guest_GS is equal to supported_sel is not needed
+            here. */
+#else
+#error "Unknown platform"
+#endif
+
+         PRE_MEM_WRITE("lwp_private(base)", ARG3, sizeof(Addr));
+         if (!ML_(safe_to_deref((void*)ARG3, sizeof(Addr)))) {
+            SET_STATUS_Failure(VKI_EFAULT);
+            return;
+         }
+         *(Addr*)ARG3 = thrptr;
+         POST_MEM_WRITE((Addr)ARG3, sizeof(Addr));
+         SET_STATUS_Success(0);
+         break;
+      }
+   default:
+      VG_(unimplemented)("Syswrap of the lwp_private call where cmd=%ld.",
+                         ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+PRE(sys_lwp_wait)
+{
+   /* int lwp_wait(id_t lwpid, id_t *departed); */
+   *flags |= SfMayBlock;
+   PRINT("sys_lwp_wait ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "lwp_wait", vki_id_t, lwpid, id_t *, departed);
+   if (ARG2)
+      PRE_MEM_WRITE("lwp_wait(departed)", ARG2, sizeof(vki_id_t));
+}
+
+POST(sys_lwp_wait)
+{
+   POST_MEM_WRITE(ARG2, sizeof(vki_id_t));
+}
+
+PRE(sys_lwp_mutex_wakeup)
+{
+   /* int lwp_mutex_wakeup(lwp_mutex_t *lp, int release_all); */
+   vki_lwp_mutex_t *lp = (vki_lwp_mutex_t*)ARG1;
+   *flags |= SfMayBlock;
+   PRINT("sys_lwp_mutex_wakeup ( %#lx, %ld )", ARG1, ARG2);
+   PRE_REG_READ2(long, "lwp_mutex_wakeup", lwp_mutex_t *, lp,
+                 int, release_all);
+   PRE_FIELD_READ("lwp_mutex_wakeup(lp->mutex_type)", lp->vki_mutex_type);
+   PRE_FIELD_WRITE("lwp_mutex_wakeup(lp->mutex_waiters)",
+                   lp->vki_mutex_waiters);
+}
+
+POST(sys_lwp_mutex_wakeup)
+{
+   vki_lwp_mutex_t *lp = (vki_lwp_mutex_t*)ARG1;
+   POST_FIELD_WRITE(lp->vki_mutex_waiters);
+}
+
+PRE(sys_lwp_cond_broadcast)
+{
+   /* int lwp_cond_broadcast(lwp_cond_t *cvp); */
+   vki_lwp_cond_t *cvp = (vki_lwp_cond_t*)ARG1;
+   *flags |= SfMayBlock;
+   PRINT("sys_lwp_cond_broadcast ( %#lx )", ARG1);
+   PRE_REG_READ1(long, "lwp_cond_broadcast", lwp_cond_t *, cvp);
+
+   PRE_FIELD_READ("lwp_cond_broadcast(cvp->type)", cvp->vki_cond_type);
+   PRE_FIELD_READ("lwp_cond_broadcast(cvp->waiters_kernel)",
+                  cvp->vki_cond_waiters_kernel);
+   /*PRE_FIELD_WRITE("lwp_cond_broadcast(cvp->waiters_kernel)",
+                     cvp->vki_cond_waiters_kernel);*/
+}
+
+POST(sys_lwp_cond_broadcast)
+{
+   vki_lwp_cond_t *cvp = (vki_lwp_cond_t*)ARG1;
+   POST_FIELD_WRITE(cvp->vki_cond_waiters_kernel);
+}
+
+PRE(sys_pread)
+{
+   /* ssize_t pread(int fildes, void *buf, size_t nbyte, off_t offset); */
+   *flags |= SfMayBlock;
+   PRINT("sys_pread ( %ld, %#lx, %lu, %ld )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "pread", int, fildes, void *, buf,
+                 vki_size_t, nbyte, vki_off_t, offset);
+   PRE_MEM_WRITE("pread(buf)", ARG2, ARG3);
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "pread", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_pread)
+{
+   POST_MEM_WRITE(ARG2, RES);
+}
+
+PRE(sys_pwrite)
+{
+   /* ssize_t pwrite(int fildes, const void *buf, size_t nbyte,
+                     off_t offset); */
+   *flags |= SfMayBlock;
+   PRINT("sys_pwrite ( %ld, %#lx, %lu, %ld )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "pwrite", int, fildes, const void *, buf,
+                 vki_size_t, nbyte, vki_off_t, offset);
+   PRE_MEM_READ("pwrite(buf)", ARG2, ARG3);
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "pwrite", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_getpagesizes)
+{
+   /* int getpagesizes(int legacy, size_t *buf, int nelem); */
+   PRINT("sys_getpagesizes ( %ld, %#lx, %ld )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "getpagesizes", int, legacy, size_t *, buf,
+                 int, nelem);
+   if (ARG2)
+      PRE_MEM_WRITE("getpagesizes(buf)", ARG2, ARG3 * sizeof(vki_size_t));
+}
+
+POST(sys_getpagesizes)
+{
+   if (ARG2)
+      POST_MEM_WRITE(ARG2, RES * sizeof(vki_size_t));
+}
+
+PRE(sys_rusagesys)
+{
+   /* Kernel: int rusagesys(int code, void *arg1, void *arg2,
+                            void *arg3, void *arg4); */
+   switch (ARG1 /*code*/) {
+   case VKI__RUSAGESYS_GETRUSAGE:
+   case VKI__RUSAGESYS_GETRUSAGE_CHLD:
+   case VKI__RUSAGESYS_GETRUSAGE_LWP:
+      /* Libc: int getrusage(int who, struct rusage *r_usage); */
+      PRINT("sys_rusagesys ( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("rusagesys", "getrusage"), int, code,
+                    struct vki_rusage *, r_usage);
+      PRE_MEM_WRITE("rusagesys(r_usage)", ARG2, sizeof(struct vki_rusage));
+      break;
+
+   case VKI__RUSAGESYS_GETVMUSAGE:
+      /* Libc: int getvmusage(uint_t flags, time_t age,
+                              vmusage_t *buf, size_t *nres); */
+      PRINT("sys_rusagesys ( %ld, %ld, %ld, %#lx, %#lx )",
+            ARG1, ARG2, ARG3, ARG4, ARG5);
+      PRE_REG_READ5(long, SC2("rusagesys", "getvmusage"), int, code,
+                    vki_uint_t, flags, vki_time_t, age,
+                    vki_vmusage_t *, buf, vki_size_t *, nres);
+      PRE_MEM_READ("rusagesys(nres)", ARG5, sizeof(vki_size_t));
+      /* PRE_MEM_WRITE("rusagesys(nres)", ARG5, sizeof(vki_size_t)); */
+
+      if (ML_(safe_to_deref)((void *) ARG5, sizeof(vki_size_t))) {
+         vki_size_t *nres = (vki_size_t *) ARG5;
+         PRE_MEM_WRITE("rusagesys(buf)", ARG4,
+                       *nres * sizeof(vki_vmusage_t));
+      }
+      *flags |= SfMayBlock;
+      break;
+
+   default:
+      VG_(unimplemented)("Syswrap of the rusagesys call with code %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_rusagesys)
+{
+   switch (ARG1 /*code*/) {
+   case VKI__RUSAGESYS_GETRUSAGE:
+   case VKI__RUSAGESYS_GETRUSAGE_CHLD:
+   case VKI__RUSAGESYS_GETRUSAGE_LWP:
+      POST_MEM_WRITE(ARG2, sizeof(struct vki_rusage));
+      break;
+   case VKI__RUSAGESYS_GETVMUSAGE:
+      {
+         vki_size_t *nres = (vki_size_t *) ARG5;
+         POST_MEM_WRITE(ARG5, sizeof(vki_size_t));
+         POST_MEM_WRITE(ARG4, *nres * sizeof(vki_vmusage_t));
+      }
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+
+}
+
+PRE(sys_port)
+{
+   /* Kernel: int64_t portfs(int opcode, uintptr_t a0, uintptr_t a1,
+                             uintptr_t a2, uintptr_t a3, uintptr_t a4); */
+   Int opcode = ARG1 & VKI_PORT_CODE_MASK;
+   *flags |= SfMayBlock;
+   switch (opcode) {
+   case VKI_PORT_CREATE:
+      PRINT("sys_port ( %ld )", ARG1);
+      PRE_REG_READ1(long, SC2("port", "create"), int, opcode);
+      break;
+   case VKI_PORT_ASSOCIATE:
+   case VKI_PORT_DISSOCIATE:
+      PRINT("sys_port ( %ld, %ld, %ld, %#lx, %ld, %#lx )", ARG1, ARG2, ARG3,
+            ARG4, ARG5, ARG6);
+      if (opcode == VKI_PORT_ASSOCIATE) {
+         PRE_REG_READ6(long, SC2("port", "associate"), int, opcode, int, a0,
+                       int, a1, uintptr_t, a2, int, a3, void *, a4);
+      }
+      else {
+         PRE_REG_READ6(long, SC2("port", "dissociate"), int, opcode, int, a0,
+                       int, a1, uintptr_t, a2, int, a3, void *, a4);
+      }
+
+      switch (ARG3 /*source*/) {
+      case VKI_PORT_SOURCE_FD:
+         if (!ML_(fd_allowed)(ARG4, "port", tid, False)) {
+            SET_STATUS_Failure(VKI_EBADF);
+         }
+         break;
+      case VKI_PORT_SOURCE_FILE:
+         {
+            struct vki_file_obj *fo = (struct vki_file_obj *)ARG4;
+            PRE_MEM_READ("port(file_obj)", ARG4, sizeof(struct vki_file_obj));
+            if (ML_(safe_to_deref)(&fo->fo_name, sizeof(fo->fo_name)))
+               PRE_MEM_RASCIIZ("port(file_obj->fo_name)", (Addr)fo->fo_name);
+         }
+         break;
+      default:
+         VG_(unimplemented)("Syswrap of the port_associate/dissociate call "
+                            "type %ld.", ARG3);
+         /*NOTREACHED*/
+         break;
+      }
+      break;
+   case VKI_PORT_SEND:
+      PRINT("sys_port ( %ld, %ld, %ld, %#lx )", ARG1, ARG2, ARG3, ARG4);
+      PRE_REG_READ4(long, SC2("port", "send"), int, opcode, int, a0, int, a1,
+                    void *, a2);
+      break;
+   case VKI_PORT_SENDN:
+      PRINT("sys_port ( %ld, %#lx, %#lx, %lu, %lx, %#lx)", ARG1, ARG2, ARG3,
+            ARG4, ARG5, ARG6);
+      PRE_REG_READ6(long, SC2("port", "sendn"), int, opcode, int *, a0,
+                    int *, a1, vki_uint_t, a2, int, a3, void *, a4);
+      PRE_MEM_READ("port(ports)", ARG2, ARG4 * sizeof(int));
+      PRE_MEM_WRITE("port(errors)", ARG3, ARG4 * sizeof(int));
+      break;
+   case VKI_PORT_GET:
+      PRINT("sys_port ( %ld, %ld, %#lx, %ld, %ld, %#lx )", ARG1, ARG2, ARG3,
+            ARG4, ARG5, ARG6);
+      PRE_REG_READ6(long, SC2("port", "get"), int, opcode, int, a0,
+                    port_event_t *, a1, vki_time_t, a2, long, a3,
+                    timespec_t *, a4);
+      PRE_MEM_WRITE("port(uevp)", ARG3, sizeof(vki_port_event_t));
+      break;
+   case VKI_PORT_GETN:
+      PRINT("sys_port ( %ld, %ld, %#lx, %lu, %lu, %#lx )", ARG1, ARG2, ARG3,
+            ARG4, ARG5, ARG6);
+      PRE_REG_READ6(long, SC2("port", "getn"), int, opcode, int, a0,
+                    port_event_t *, a1, vki_uint_t, a2, vki_uint_t, a3,
+                    timespec_t *, a4);
+      if (ARG6)
+         PRE_MEM_READ("port(timeout)", ARG6, sizeof(vki_timespec_t));
+      PRE_MEM_WRITE("port(uevp)", ARG3, ARG4 * sizeof(vki_port_event_t));
+      break;
+   case VKI_PORT_ALERT:
+      PRINT("sys_port ( %ld, %ld, %ld, %ld, %#lx )", ARG1, ARG2, ARG3, ARG4,
+            ARG5);
+      PRE_REG_READ5(long, SC2("port", "alert"), int, opcode, int, a0, int, a1,
+                    int, a2, void *, a3);
+      break;
+   case VKI_PORT_DISPATCH:
+      PRINT("sys_port ( %ld, %ld, %ld, %ld, %#lx, %#lx )", ARG2, ARG1, ARG3,
+            ARG4, ARG5, ARG6);
+      PRE_REG_READ6(long, SC2("port", "dispatch"), int, opcode, int, a0,
+                    int, a1, int, a2, uintptr_t, a3, void *, a4);
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the port call with opcode %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+
+   /* Be strict. */
+   if ((opcode != VKI_PORT_CREATE && opcode != VKI_PORT_SENDN) &&
+       !ML_(fd_allowed)(ARG2, "port", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_port)
+{
+   Int opcode = ARG1 & VKI_PORT_CODE_MASK;
+   switch (opcode) {
+   case VKI_PORT_CREATE:
+      if (!ML_(fd_allowed)(RES, "port", tid, True)) {
+         VG_(close)(RES);
+         SET_STATUS_Failure(VKI_EMFILE);
+      }
+      else if (VG_(clo_track_fds))
+         ML_(record_fd_open_named)(tid, RES);
+      break;
+   case VKI_PORT_ASSOCIATE:
+   case VKI_PORT_DISSOCIATE:
+   case VKI_PORT_SEND:
+      break;
+   case VKI_PORT_SENDN:
+      if (RES != ARG4) {
+         /* If there is any error then the whole errors area is written. */
+         POST_MEM_WRITE(ARG3, ARG4 * sizeof(int));
+      }
+      break;
+   case VKI_PORT_GET:
+      POST_MEM_WRITE(ARG3, sizeof(vki_port_event_t));
+      break;
+   case VKI_PORT_GETN:
+      POST_MEM_WRITE(ARG3, RES * sizeof(vki_port_event_t));
+      break;
+   case VKI_PORT_ALERT:
+   case VKI_PORT_DISPATCH:
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the port call with opcode %ld.", ARG2);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+PRE(sys_pollsys)
+{
+   /* int pollsys(pollfd_t *fds, nfds_t nfds, timespec_t *timeout,
+                  sigset_t *set); */
+   UWord i;
+   struct vki_pollfd *ufds = (struct vki_pollfd *)ARG1;
+
+   *flags |= SfMayBlock;
+
+   PRINT("sys_pollsys ( %#lx, %lu, %#lx, %#lx )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "poll", pollfd_t *, fds, vki_nfds_t, nfds,
+                 timespec_t *, timeout, sigset_t *, set);
+
+   for (i = 0; i < ARG2; i++) {
+      vki_pollfd_t *u = &ufds[i];
+      PRE_FIELD_READ("poll(ufds.fd)", u->fd);
+      /* XXX Check if it's valid? */
+      PRE_FIELD_READ("poll(ufds.events)", u->events);
+      PRE_FIELD_WRITE("poll(ufds.revents)", u->revents);
+   }
+
+   if (ARG3)
+      PRE_MEM_READ("poll(timeout)", ARG3, sizeof(vki_timespec_t));
+   if (ARG4)
+      PRE_MEM_READ("poll(set)", ARG4, sizeof(vki_sigset_t));
+}
+
+POST(sys_pollsys)
+{
+   if (RES >= 0) {
+      UWord i;
+      vki_pollfd_t *ufds = (vki_pollfd_t*)ARG1;
+      for (i = 0; i < ARG2; i++)
+         POST_FIELD_WRITE(ufds[i].revents);
+   }
+}
+
+PRE(sys_labelsys)
+{
+   /* Kernel: int labelsys(int op, void *a1, void *a2, void *a3,
+                           void *a4, void *a5); */
+
+   switch (ARG1 /*op*/) {
+   case VKI_TSOL_SYSLABELING:
+      /* Libc: int is_system_labeled(void); */
+      PRINT("sys_labelsys ( %ld )", ARG1);
+      PRE_REG_READ1(long, SC2("labelsys", "syslabeling"), int, op);
+      break;
+
+   case VKI_TSOL_TNRH:
+      /* Libtsnet: int tnrh(int cmd, tsol_rhent_t *buf); */
+      PRINT("sys_labelsys ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("labelsys", "tnrh"), int, op, int, cmd,
+                    vki_tsol_rhent_t *, buf);
+      if (ARG2 != VKI_TNDB_FLUSH)
+         PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_rhent_t));
+      break;
+
+   case VKI_TSOL_TNRHTP:
+      /* Libtsnet: int tnrhtp(int cmd, tsol_tpent_t *buf); */
+      PRINT("sys_labelsys ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("labelsys", "tnrhtp"), int, op, int, cmd,
+                    vki_tsol_tpent_t *, buf);
+      if (ARG2 != VKI_TNDB_FLUSH)
+         PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_tpent_t));
+      break;
+
+   case VKI_TSOL_TNMLP:
+      /* Libtsnet: int tnmlp(int cmd, tsol_mlpent_t *buf); */
+      PRINT("sys_labelsys ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("labelsys", "tnmlp"), int, op, int, cmd,
+                    vki_tsol_mlpent_t *, buf);
+      PRE_MEM_READ("labelsys(buf)", ARG3, sizeof(vki_tsol_mlpent_t));
+      break;
+
+   case VKI_TSOL_GETLABEL:
+      /* Libtsol: int getlabel(const char *path, bslabel_t *label); */
+      PRINT("sys_labelsys ( %ld, %#lx(%s), %#lx )",
+            ARG1, ARG2, (HChar *) ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("labelsys", "getlabel"), int, op,
+                    const char *, path, vki_bslabel_t *, label);
+      PRE_MEM_RASCIIZ("labelsys(path)", ARG2);
+      PRE_MEM_WRITE("labelsys(label)", ARG3, sizeof(vki_bslabel_t));
+      break;
+
+   case VKI_TSOL_FGETLABEL:
+      /* Libtsol: int fgetlabel(int fd, bslabel_t *label); */
+      PRINT("sys_labelsys ( %ld, %ld, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("labelsys", "fgetlabel"), int, op,
+                    int, fd, vki_bslabel_t *, label);
+      /* Be strict. */
+      if (!ML_(fd_allowed)(ARG2, "labelsys(fgetlabel)", tid, False))
+         SET_STATUS_Failure(VKI_EBADF);
+      PRE_MEM_WRITE("labelsys(label)", ARG3, sizeof(vki_bslabel_t));
+      break;
+
+#if defined(SOLARIS_TSOL_CLEARANCE)
+   case VKI_TSOL_GETCLEARANCE:
+      /* Libtsol: int getclearance(bslabel_t *clearance); */
+      PRINT("sys_labelsys ( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("labelsys", "getclearance"), int, op,
+                    vki_bslabel_t *, clearance);
+      PRE_MEM_WRITE("labelsys(clearance)", ARG2, sizeof(vki_bslabel_t));
+      break;
+
+   case VKI_TSOL_SETCLEARANCE:
+      /* Libtsol: int setclearance(bslabel_t *clearance); */
+      PRINT("sys_labelsys ( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("labelsys", "setclearance"), int, op,
+                    vki_bslabel_t *, clearance);
+      PRE_MEM_READ("labelsys(clearance)", ARG2, sizeof(vki_bslabel_t));
+      break;
+#endif /* SOLARIS_TSOL_CLEARANCE */
+
+   default:
+      VG_(unimplemented)("Syswrap of the labelsys call with op %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_labelsys)
+{
+   switch (ARG1 /*op*/) {
+   case VKI_TSOL_SYSLABELING:
+      break;
+
+   case VKI_TSOL_TNRH:
+      switch (ARG2 /*cmd*/) {
+      case VKI_TNDB_LOAD:
+      case VKI_TNDB_DELETE:
+      case VKI_TNDB_FLUSH:
+         break;
+#if defined(SOLARIS_TNDB_GET_TNIP)
+      case TNDB_GET_TNIP:
+#endif /* SOLARIS_TNDB_GET_TNIP */
+      case VKI_TNDB_GET:
+         POST_MEM_WRITE(ARG3, sizeof(vki_tsol_rhent_t));
+         break;
+      default:
+         vg_assert(0);
+         break;
+      }
+      break;
+
+   case VKI_TSOL_TNRHTP:
+      switch (ARG2 /*cmd*/) {
+      case VKI_TNDB_LOAD:
+      case VKI_TNDB_DELETE:
+      case VKI_TNDB_FLUSH:
+         break;
+      case VKI_TNDB_GET:
+         POST_MEM_WRITE(ARG3, sizeof(vki_tsol_tpent_t));
+         break;
+      default:
+         vg_assert(0);
+         break;
+      }
+      break;
+
+   case VKI_TSOL_TNMLP:
+      switch (ARG2 /*cmd*/) {
+      case VKI_TNDB_LOAD:
+      case VKI_TNDB_DELETE:
+      case VKI_TNDB_FLUSH:
+         break;
+      case VKI_TNDB_GET:
+         POST_MEM_WRITE(ARG3, sizeof(vki_tsol_mlpent_t));
+         break;
+      default:
+         vg_assert(0);
+         break;
+      }
+      break;
+
+   case VKI_TSOL_GETLABEL:
+   case VKI_TSOL_FGETLABEL:
+      POST_MEM_WRITE(ARG3, sizeof(vki_bslabel_t));
+      break;
+
+#if defined(SOLARIS_TSOL_CLEARANCE)
+   case VKI_TSOL_GETCLEARANCE:
+      POST_MEM_WRITE(ARG2, sizeof(vki_bslabel_t));
+      break;
+
+   case VKI_TSOL_SETCLEARANCE:
+      break;
+#endif /* SOLARIS_TSOL_CLEARANCE */
+
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_acl)
+{
+   /* int acl(char *pathp, int cmd, int nentries, void *aclbufp); */
+   PRINT("sys_acl ( %#lx(%s), %ld, %ld, %#lx )", ARG1, (HChar *) ARG1, ARG2,
+         ARG3, ARG4);
+
+   PRE_REG_READ4(long, "acl", char *, pathp, int, cmd,
+                 int, nentries, void *, aclbufp);
+   PRE_MEM_RASCIIZ("acl(pathp)", ARG1);
+
+   switch (ARG2 /*cmd*/) {
+   case VKI_SETACL:
+      if (ARG4)
+         PRE_MEM_READ("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
+      break;
+   case VKI_GETACL:
+      PRE_MEM_WRITE("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
+      break;
+   case VKI_GETACLCNT:
+      break;
+   case VKI_ACE_SETACL:
+      if (ARG4)
+         PRE_MEM_READ("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
+      break;
+   case VKI_ACE_GETACL:
+      PRE_MEM_WRITE("acl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
+      break;
+   case VKI_ACE_GETACLCNT:
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the acl call with cmd %ld.", ARG2);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_acl)
+{
+   switch (ARG2 /*cmd*/) {
+   case VKI_SETACL:
+      break;
+   case VKI_GETACL:
+      POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_aclent_t));
+      break;
+   case VKI_GETACLCNT:
+      break;
+   case VKI_ACE_SETACL:
+      break;
+   case VKI_ACE_GETACL:
+      POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_ace_t));
+      break;
+   case VKI_ACE_GETACLCNT:
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_auditsys)
+{
+   /* Kernel: int auditsys(long code, long a1, long a2, long a3, long a4); */
+   switch (ARG1 /*code*/) {
+   case VKI_BSM_GETAUID:
+      /* Libbsm: int getauid(au_id_t *auid); */
+      PRINT("sys_auditsys ( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("auditsys", "getauid"), long, code,
+                    vki_au_id_t *, auid);
+      PRE_MEM_WRITE("auditsys(auid)", ARG2, sizeof(vki_au_id_t));
+      break;
+   case VKI_BSM_SETAUID:
+      /* Libbsm: int setauid(au_id_t *auid); */
+      PRINT("sys_auditsys ( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("auditsys", "setauid"), long, code,
+                    vki_au_id_t *, auid);
+      PRE_MEM_READ("auditsys(auid)", ARG2, sizeof(vki_au_id_t));
+      break;
+   case VKI_BSM_GETAUDIT:
+      /* Libbsm: int getaudit(auditinfo_t *ai); */
+      PRINT("sys_auditsys ( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("auditsys", "getaudit"), long, code,
+                    vki_auditinfo_t *, ai);
+      PRE_MEM_WRITE("auditsys(ai)", ARG2, sizeof(vki_auditinfo_t));
+      break;
+   case VKI_BSM_SETAUDIT:
+      /* Libbsm: int setaudit(auditinfo_t *ai); */
+      PRINT("sys_auditsys ( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("auditsys", "setaudit"), long, code,
+                    vki_auditinfo_t *, ai);
+      PRE_MEM_READ("auditsys(ai)", ARG2, sizeof(vki_auditinfo_t));
+      break;
+   case VKI_BSM_AUDIT:
+      /* Libbsm: int audit(void *record, int length); */
+      PRINT("sys_auditsys ( %ld, %#lx, %ld )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("auditsys", "audit"), long, code,
+                    void *, record, int, length);
+      PRE_MEM_READ("auditsys(record)", ARG2, ARG3);
+      break;
+   case VKI_BSM_AUDITCTL:
+      /* Libbsm: int auditon(int cmd, caddr_t data, int length); */
+      PRINT("sys_auditsys ( %ld, %ld, %#lx, %ld )",
+            ARG1, ARG2, ARG3, ARG4);
+
+      switch (ARG2 /*cmd*/) {
+      case VKI_A_GETPOLICY:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getpolicy"),
+                       long, code, int, cmd, vki_uint32_t *, policy);
+         PRE_MEM_WRITE("auditsys(policy)", ARG3, sizeof(vki_uint32_t));
+         break;
+      case VKI_A_SETPOLICY:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setpolicy"),
+                       long, code, int, cmd, vki_uint32_t *, policy);
+         PRE_MEM_READ("auditsys(policy)", ARG3, sizeof(vki_uint32_t));
+         break;
+      case VKI_A_GETKMASK:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getkmask"),
+                       long, code, int, cmd, vki_au_mask_t *, kmask);
+         PRE_MEM_WRITE("auditsys(kmask)", ARG3, sizeof(vki_au_mask_t));
+         break;
+      case VKI_A_SETKMASK:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setkmask"),
+                       long, code, int, cmd, vki_au_mask_t *, kmask);
+         PRE_MEM_READ("auditsys(kmask)", ARG3, sizeof(vki_au_mask_t));
+         break;
+      case VKI_A_GETQCTRL:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getqctrl"),
+                       long, code, int, cmd,
+                       struct vki_au_qctrl *, qctrl);
+         PRE_MEM_WRITE("auditsys(qctrl)", ARG3,
+                       sizeof(struct vki_au_qctrl));
+         break;
+      case VKI_A_SETQCTRL:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setqctrl"),
+                       long, code, int, cmd,
+                       struct vki_au_qctrl *, qctrl);
+         PRE_MEM_READ("auditsys(qctrl)", ARG3,
+                      sizeof(struct vki_au_qctrl));
+         break;
+      case VKI_A_GETCWD:
+         PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getcwd"),
+                       long, code, int, cmd, char *, data, int, length);
+         PRE_MEM_WRITE("auditsys(data)", ARG3, ARG4);
+         break;
+      case VKI_A_GETCAR:
+         PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getcar"),
+                       long, code, int, cmd, char *, data, int, length);
+         PRE_MEM_WRITE("auditsys(data)", ARG3, ARG4);
+         break;
+      case VKI_A_GETSTAT:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getstat"),
+                       long, code, int, cmd, vki_au_stat_t *, stats);
+         PRE_MEM_WRITE("auditsys(stats)", ARG3, sizeof(vki_au_stat_t));
+         break;
+      case VKI_A_SETSTAT:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setstat"),
+                       long, code, int, cmd, vki_au_stat_t *, stats);
+         PRE_MEM_READ("auditsys(stats)", ARG3, sizeof(vki_au_stat_t));
+         break;
+      case VKI_A_SETUMASK:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setumask"),
+                       long, code, int, cmd, vki_auditinfo_t *, umask);
+         PRE_MEM_READ("auditsys(umask)", ARG3, sizeof(vki_auditinfo_t));
+         break;
+      case VKI_A_SETSMASK:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setsmask"),
+                       long, code, int, cmd, vki_auditinfo_t *, smask);
+         PRE_MEM_READ("auditsys(smask)", ARG3, sizeof(vki_auditinfo_t));
+         break;
+      case VKI_A_GETCOND:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getcond"),
+                       long, code, int, cmd, int *, cond);
+         PRE_MEM_WRITE("auditsys(cond)", ARG3, sizeof(int));
+         break;
+      case VKI_A_SETCOND:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setcond"),
+                       long, code, int, cmd, int *, state);
+         PRE_MEM_READ("auditsys(cond)", ARG3, sizeof(int));
+         break;
+      case VKI_A_GETCLASS:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getclass"),
+                       long, code, int, cmd,
+                       vki_au_evclass_map_t *, classmap);
+
+         if (ML_(safe_to_deref((void *) ARG3,
+                               sizeof(vki_au_evclass_map_t)))) {
+            vki_au_evclass_map_t *classmap =
+               (vki_au_evclass_map_t *) ARG3;
+            PRE_FIELD_READ("auditsys(classmap.ec_number)",
+                           classmap->ec_number);
+            PRE_MEM_WRITE("auditsys(classmap)", ARG3,
+                          sizeof(vki_au_evclass_map_t));
+         }
+         break;
+      case VKI_A_SETCLASS:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setclass"),
+                       long, code, int, cmd,
+                       vki_au_evclass_map_t *, classmap);
+
+         if (ML_(safe_to_deref((void *) ARG3, 
+                               sizeof(vki_au_evclass_map_t)))) {
+            vki_au_evclass_map_t *classmap =
+               (vki_au_evclass_map_t *) ARG3;
+            PRE_FIELD_READ("auditsys(classmap.ec_number)", 
+                           classmap->ec_number);  
+            PRE_FIELD_READ("auditsys(classmap.ec_class)", 
+                           classmap->ec_class);
+         }
+         break;
+      case VKI_A_GETPINFO:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getpinfo"),
+                       long, code, int, cmd,
+                       struct vki_auditpinfo *, apinfo);
+
+         if (ML_(safe_to_deref((void *) ARG3,
+                               sizeof(struct vki_auditpinfo)))) {
+            struct vki_auditpinfo *apinfo =
+               (struct vki_auditpinfo *) ARG3;
+            PRE_FIELD_READ("auditsys(apinfo.ap_pid)", apinfo->ap_pid);
+            PRE_MEM_WRITE("auditsys(apinfo)", ARG3,
+                          sizeof(struct vki_auditpinfo));
+         }
+         break;
+      case VKI_A_SETPMASK:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setpmask"),
+                       long, code, int, cmd,
+                       struct vki_auditpinfo *, apinfo);
+         PRE_MEM_WRITE("auditsys(apinfo)", ARG3,
+                       sizeof(struct vki_auditpinfo));
+         break;
+      case VKI_A_GETPINFO_ADDR:
+         PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getpinfo_addr"),
+                       long, code, int, cmd,
+                       struct vki_auditpinfo_addr *, apinfo, int, length);
+
+         if (ML_(safe_to_deref((void *) ARG3,
+                               sizeof(struct vki_auditpinfo_addr)))) {
+            struct vki_auditpinfo_addr *apinfo_addr =
+               (struct vki_auditpinfo_addr *) ARG3;
+            PRE_FIELD_READ("auditsys(apinfo_addr.ap_pid)",
+                           apinfo_addr->ap_pid);
+            PRE_MEM_WRITE("auditsys(apinfo_addr)", ARG3, ARG4);
+         }
+         break;
+      case VKI_A_GETKAUDIT:
+         PRE_REG_READ4(long, SC3("auditsys", "auditctl", "getkaudit"),
+                       long, code, int, cmd,
+                       vki_auditinfo_addr_t *, kaudit, int, length);
+         PRE_MEM_WRITE("auditsys(kaudit)", ARG3, ARG4);
+         break;
+      case VKI_A_SETKAUDIT:
+         PRE_REG_READ4(long, SC3("auditsys", "auditctl", "setkaudit"),
+                       long, code, int, cmd,
+                       vki_auditinfo_addr_t *, kaudit, int, length);
+         PRE_MEM_READ("auditsys(kaudit)", ARG3, ARG4);
+         break;
+      case VKI_A_GETAMASK:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "getamask"),
+                       long, code, int, cmd, vki_au_mask_t *, amask);
+         PRE_MEM_WRITE("auditsys(amask)", ARG3, sizeof(vki_au_mask_t));
+         break;
+      case VKI_A_SETAMASK:
+         PRE_REG_READ3(long, SC3("auditsys", "auditctl", "setamask"),
+                       long, code, int, cmd, vki_au_mask_t *, amask);
+         PRE_MEM_READ("auditsys(amask)", ARG3, sizeof(vki_au_mask_t));
+         break;
+      default:
+         VG_(unimplemented)("Syswrap of the auditsys(auditctl) call "
+                            "with cmd %ld.", ARG2);
+         /*NOTREACHED*/
+         break;
+      }
+      break;
+   case VKI_BSM_GETAUDIT_ADDR:
+      /* Libbsm: int getaudit_addr(auditinfo_addr_t *ai, int len); */
+      PRINT("sys_auditsys ( %ld, %#lx, %ld )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("auditsys", "getaudit_addr"), long, code,
+                    vki_auditinfo_addr_t *, ai, int, len);
+      PRE_MEM_WRITE("auditsys(ai)", ARG2, ARG3);
+      break;
+   case VKI_BSM_SETAUDIT_ADDR:
+      /* Libbsm: int setaudit_addr(auditinfo_addr_t *ai, int len); */
+      PRINT("sys_auditsys ( %ld, %#lx, %ld )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("auditsys", "setaudit_addr"), long, code,
+                    vki_auditinfo_addr_t *, ai, int, len);
+      PRE_MEM_READ("auditsys(ai)", ARG2, ARG3);
+      break;
+   case VKI_BSM_AUDITDOOR:
+      /* Libbsm: int auditdoor(int fd); */
+      PRINT("sys_auditsys ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("auditsys", "door"), long, code, int, fd);
+
+      /* Be strict. */
+      if (!ML_(fd_allowed)(ARG2, SC2("auditsys", "door")"(fd)",
+                           tid, False))
+         SET_STATUS_Failure(VKI_EBADF);
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the auditsys call with code %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_auditsys)
+{
+   switch (ARG1 /*code*/) {
+   case VKI_BSM_GETAUID:
+      POST_MEM_WRITE(ARG2, sizeof(vki_au_id_t));
+      break;
+   case VKI_BSM_SETAUID:
+      break;
+   case VKI_BSM_GETAUDIT:
+      POST_MEM_WRITE(ARG2, sizeof(vki_auditinfo_t));
+      break;
+   case VKI_BSM_SETAUDIT:
+   case VKI_BSM_AUDIT:
+      break;
+   case VKI_BSM_AUDITCTL:
+      switch (ARG2 /*cmd*/) {
+         case VKI_A_GETPOLICY:
+            POST_MEM_WRITE(ARG3, sizeof(vki_uint32_t));
+            break;
+         case VKI_A_SETPOLICY:
+            break;
+         case VKI_A_GETKMASK:
+            POST_MEM_WRITE(ARG3, sizeof(vki_au_mask_t));
+            break;
+         case VKI_A_SETKMASK:
+            break;
+         case VKI_A_GETQCTRL:
+            POST_MEM_WRITE(ARG3, sizeof(struct vki_au_qctrl));
+            break;
+         case VKI_A_SETQCTRL:
+            break;
+         case VKI_A_GETCWD:
+         case VKI_A_GETCAR:
+            POST_MEM_WRITE(ARG3, VG_(strlen)((HChar *) ARG3) + 1);
+            break;
+         case VKI_A_GETSTAT:
+            POST_MEM_WRITE(ARG3, sizeof(vki_au_stat_t));
+            break;
+         case VKI_A_SETSTAT:
+         case VKI_A_SETUMASK:
+         case VKI_A_SETSMASK:
+            break;
+         case VKI_A_GETCOND:
+            POST_MEM_WRITE(ARG3, sizeof(int));
+            break;
+         case VKI_A_SETCOND:
+            break;
+         case VKI_A_GETCLASS:
+            POST_MEM_WRITE(ARG3, sizeof(vki_au_evclass_map_t));
+            break;
+         case VKI_A_SETCLASS:
+            break;
+         case VKI_A_GETPINFO:
+            POST_MEM_WRITE(ARG3, sizeof(struct vki_auditpinfo));
+            break;
+         case VKI_A_SETPMASK:
+            break;
+         case VKI_A_GETPINFO_ADDR:
+            POST_MEM_WRITE(ARG3, sizeof(struct auditpinfo_addr));
+            break;
+         case VKI_A_GETKAUDIT:
+            POST_MEM_WRITE(ARG3, sizeof(vki_auditinfo_addr_t));
+            break;
+         case VKI_A_SETKAUDIT:
+            break;
+         case VKI_A_GETAMASK:
+            POST_MEM_WRITE(ARG3, sizeof(vki_au_mask_t));
+            break;
+         case VKI_A_SETAMASK:
+            break;
+      }
+      break;
+   case VKI_BSM_GETAUDIT_ADDR:
+      POST_MEM_WRITE(ARG2, sizeof(vki_auditinfo_addr_t));
+      break;
+   case VKI_BSM_SETAUDIT_ADDR:
+      break;
+   case VKI_BSM_AUDITDOOR:
+      break;
+   }
+}
+
+PRE(sys_p_online)
+{
+   /* int p_online(processorid_t processorid, int flag); */
+   PRINT("sys_p_online ( %ld, %ld )", ARG1, ARG2);
+   PRE_REG_READ2(long, "p_online", vki_processorid_t, processorid, int, flag);
+}
+
+PRE(sys_sigqueue)
+{
+   /* int sigqueue(pid_t pid, int signo, void *value,
+                   int si_code, timespec_t *timeout);
+    */
+   PRINT("sys_sigqueue ( %ld, %ld, %#lx, %ld, %#lx )",
+         ARG1, ARG2, ARG3, ARG4, ARG5);
+   PRE_REG_READ5(long, "sigqueue", vki_pid_t, pid, int, signo,
+                 void *, value, int, si_code,
+                 vki_timespec_t *, timeout);
+
+   if (ARG5)
+      PRE_MEM_READ("sigqueue(timeout)", ARG5, sizeof(vki_timespec_t));
+
+   if (!ML_(client_signal_OK)(ARG2)) {
+      SET_STATUS_Failure(VKI_EINVAL);
+      return;
+   }
+
+   /* If we're sending SIGKILL, check to see if the target is one of
+      our threads and handle it specially. */
+   if (ARG2 == VKI_SIGKILL && ML_(do_sigkill)(ARG1, -1)) {
+      SET_STATUS_Success(0);
+   } else {
+      SysRes res = VG_(do_syscall5)(SYSNO, ARG1, ARG2, ARG3, ARG4,
+                                    ARG5);
+      SET_STATUS_from_SysRes(res);
+   }
+
+   if (VG_(clo_trace_signals))
+      VG_(message)(Vg_DebugMsg,
+                   "sigqueue: signal %ld queued for pid %ld\n",
+                   ARG2, ARG1);
+
+   /* Check to see if this gave us a pending signal. */
+   *flags |= SfPollAfter;
+}
+
+PRE(sys_clock_gettime)
+{
+   /* int clock_gettime(clockid_t clock_id, struct timespec *tp); */
+   PRINT("sys_clock_gettime ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "clock_gettime", vki_clockid_t, clock_id,
+                 struct timespec *, tp);
+   PRE_MEM_WRITE("clock_gettime(tp)", ARG2, sizeof(struct vki_timespec));
+}
+
+POST(sys_clock_gettime)
+{
+   POST_MEM_WRITE(ARG2, sizeof(struct vki_timespec));
+}
+
+PRE(sys_clock_settime)
+{
+   /* int clock_settime(clockid_t clock_id, const struct timespec *tp); */
+   PRINT("sys_clock_settime ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "clock_settime", vki_clockid_t, clock_id,
+                 const struct timespec *, tp);
+   PRE_MEM_READ("clock_settime(tp)", ARG2, sizeof(struct vki_timespec));
+}
+
+PRE(sys_clock_getres)
+{
+   /* int clock_getres(clockid_t clock_id, struct timespec *res); */
+   PRINT("sys_clock_getres ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "clock_getres", vki_clockid_t, clock_id,
+                 struct timespec *, res);
+
+   if (ARG2)
+      PRE_MEM_WRITE("clock_getres(res)", ARG2, sizeof(struct vki_timespec));
+}
+
+POST(sys_clock_getres)
+{
+   if (ARG2)
+      POST_MEM_WRITE(ARG2, sizeof(struct vki_timespec));
+}
+
+PRE(sys_timer_create)
+{
+   /* int timer_create(clockid_t clock_id,
+                       struct sigevent *evp, timer_t *timerid);
+    */
+   PRINT("sys_timer_create ( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "timer_create", vki_clockid_t, clock_id,
+                 struct vki_sigevent *, evp, vki_timer_t *, timerid);
+
+   if (ARG2) {
+      struct vki_sigevent *evp = (struct vki_sigevent *) ARG2;
+      PRE_FIELD_READ("timer_create(evp.sigev_notify)", evp->sigev_notify);
+      PRE_FIELD_READ("timer_create(evp.sigev_signo)", evp->sigev_signo);
+      PRE_FIELD_READ("timer_create(evp.sigev_value.sival_int)",
+         evp->sigev_value.sival_int);
+
+      /* Be safe. */
+      if (ML_(safe_to_deref(evp, sizeof(struct vki_sigevent)))) {
+         if ((evp->sigev_notify == VKI_SIGEV_PORT) ||
+             (evp->sigev_notify == VKI_SIGEV_THREAD))
+            PRE_MEM_READ("timer_create(evp.sigev_value.sival_ptr)", 
+                         (Addr) evp->sigev_value.sival_ptr,
+                         sizeof(vki_port_notify_t));
+      }
+   }
+
+   PRE_MEM_WRITE("timer_create(timerid)", ARG3, sizeof(vki_timer_t));
+}
+
+POST(sys_timer_create)
+{
+   POST_MEM_WRITE(ARG3, sizeof(vki_timer_t));
+}
+
+PRE(sys_timer_delete)
+{
+   /* int timer_delete(timer_t timerid); */
+   PRINT("sys_timer_delete ( %ld )", ARG1);
+   PRE_REG_READ1(long, "timer_delete", vki_timer_t, timerid);
+}
+
+PRE(sys_timer_settime)
+{
+   /* int timer_settime(timer_t timerid, int flags,
+                        const struct itimerspec *value,
+                        struct itimerspec *ovalue);
+    */
+   PRINT("sys_timer_settime ( %ld, %ld, %#lx, %#lx )",
+         ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "timer_settime", vki_timer_t, timerid,
+                 int, flags, const struct vki_itimerspec *, value,
+                 struct vki_itimerspec *, ovalue);
+   PRE_MEM_READ("timer_settime(value)",
+                ARG3, sizeof(struct vki_itimerspec));
+   if (ARG4)
+      PRE_MEM_WRITE("timer_settime(ovalue)",
+                    ARG4, sizeof(struct vki_itimerspec));
+}
+
+POST(sys_timer_settime)
+{
+   if (ARG4)
+      POST_MEM_WRITE(ARG4, sizeof(struct vki_itimerspec));
+}
+
+PRE(sys_timer_gettime)
+{
+   /* int timer_gettime(timer_t timerid, struct itimerspec *value); */
+   PRINT("sys_timer_gettime ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "timer_gettime", vki_timer_t, timerid,
+                 struct vki_itimerspec *, value);
+   PRE_MEM_WRITE("timer_gettime(value)",
+                 ARG2, sizeof(struct vki_itimerspec));
+}
+
+POST(sys_timer_gettime)
+{
+   POST_MEM_WRITE(ARG2, sizeof(struct vki_itimerspec));
+}
+
+PRE(sys_timer_getoverrun)
+{
+   /* int timer_getoverrun(timer_t timerid); */
+   PRINT("sys_timer_getoverrun ( %ld )", ARG1);
+   PRE_REG_READ1(long, "timer_getoverrun", vki_timer_t, timerid);
+}
+
+PRE(sys_facl)
+{
+   /* int facl(int fildes, int cmd, int nentries, void *aclbufp); */
+   PRINT("sys_facl ( %ld, %ld, %ld, %#lx )", ARG1, ARG2, ARG3, ARG4);
+
+   PRE_REG_READ4(long, "facl", int, fildes, int, cmd,
+                 int, nentries, void *, aclbufp);
+
+   switch (ARG2 /*cmd*/) {
+   case VKI_SETACL:
+      if (ARG4)
+         PRE_MEM_READ("facl(aclbufp)", ARG4, sizeof(vki_aclent_t));
+      break;
+   case VKI_GETACL:
+      PRE_MEM_WRITE("facl(aclbufp)", ARG4, ARG3 * sizeof(vki_aclent_t));
+      break;
+   case VKI_GETACLCNT:
+      break;
+   case VKI_ACE_SETACL:
+      if (ARG4)
+         PRE_MEM_READ("facl(aclbufp)", ARG4, sizeof(vki_ace_t));
+      break;
+   case VKI_ACE_GETACL:
+      PRE_MEM_WRITE("facl(aclbufp)", ARG4, ARG3 * sizeof(vki_ace_t));
+      break;
+   case VKI_ACE_GETACLCNT:
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the facl call with cmd %ld.", ARG2);
+      /*NOTREACHED*/
+      break;
+   }
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "facl", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_facl)
+{
+   switch (ARG2 /*cmd*/) {
+   case VKI_SETACL:
+      break;
+   case VKI_GETACL:
+      POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_aclent_t));
+      break;
+   case VKI_GETACLCNT:
+      break;
+   case VKI_ACE_SETACL:
+      break;
+   case VKI_ACE_GETACL:
+      POST_MEM_WRITE(ARG4, ARG3 * sizeof(vki_ace_t));
+      break;
+   case VKI_ACE_GETACLCNT:
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+static Int pre_check_and_close_fds(ThreadId tid, const HChar *name,
+                                   vki_door_desc_t *desc_ptr,
+                                   vki_uint_t desc_num)
+{
+   vki_uint_t i;
+
+   /* Verify passed file descriptors. */
+   for (i = 0; i < desc_num; i++) {
+      vki_door_desc_t *desc = &desc_ptr[i];
+      if ((desc->d_attributes & DOOR_DESCRIPTOR) &&
+          (desc->d_attributes & DOOR_RELEASE)) {
+         Int fd = desc->d_data.d_desc.d_descriptor;
+
+         /* Detect and negate attempts by the client to close Valgrind's fds.
+            Also if doing -d style logging (which is to fd = 2 = stderr),
+            don't allow that to be closed either. */
+         if (!ML_(fd_allowed)(fd, name, tid, False) ||
+             (fd == 2 && VG_(debugLog_getLevel)() > 0))
+            return VKI_EBADF;
+      }
+   }
+
+   /* All fds are allowed, record information about the closed ones.
+
+      Note: Recording information about any closed fds should generally happen
+      in a post wrapper but it is not possible in this case because door calls
+      are "very blocking", if the information was recorded after the syscall
+      finishes then it would be out-of-date during the call, i.e. while the
+      syscall is blocked in the kernel.  Therefore, we record closed fds for
+      this specific syscall in the PRE wrapper.  Unfortunately, this creates
+      a problem when the syscall fails, for example, door_call() can fail with
+      EBADF or EFAULT and then no fds are released.  If that happens the
+      information about opened fds is incorrect.  This should be very rare (I
+      hope) and such a condition is also reported in the post wrapper. */
+   if (VG_(clo_track_fds)) {
+      for (i = 0; i < desc_num; i++) {
+         vki_door_desc_t *desc = &desc_ptr[i];
+         if ((desc->d_attributes & DOOR_DESCRIPTOR) &&
+             (desc->d_attributes & DOOR_RELEASE)) {
+            Int fd = desc->d_data.d_desc.d_descriptor;
+            ML_(record_fd_close)(fd);
+         }
+      }
+   }
+
+   return 0;
+}
+
+static void post_record_fds(ThreadId tid, const HChar *name,
+                            vki_door_desc_t *desc_ptr, vki_uint_t desc_num)
+{
+   vki_uint_t i;
+
+   /* Record returned file descriptors. */
+   for (i = 0; i < desc_num; i++) {
+      vki_door_desc_t *desc = &desc_ptr[i];
+      if (desc->d_attributes & DOOR_DESCRIPTOR) {
+         Int fd = desc->d_data.d_desc.d_descriptor;
+         if (!ML_(fd_allowed)(fd, name, tid, True)) {
+            /* Unfortunately, we cannot recover at this point and have to fail
+               hard. */
+            VG_(message)(Vg_UserMsg, "The %s syscall returned an unallowed"
+                                     "file descriptor %d.\n", name, fd);
+            VG_(exit)(101);
+         }
+         else if (VG_(clo_track_fds))
+            ML_(record_fd_open_named)(tid, fd);
+      }
+   }
+}
+
+/* Handles repository door protocol request over client door fd. */
+static void repository_door_pre_mem_door_call_hook(ThreadId tid, Int fd,
+                                                   void *data_ptr,
+                                                   SizeT data_size)
+{
+   vki_rep_protocol_request_t *p = (vki_rep_protocol_request_t *) data_ptr;
+   PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                  "request->rpr_request)", p->rpr_request);
+
+   if (ML_(safe_to_deref)(p, sizeof(vki_rep_protocol_request_t))) {
+      switch (p->rpr_request) {
+      case VKI_REP_PROTOCOL_CLOSE:
+         break;
+      case VKI_REP_PROTOCOL_ENTITY_SETUP:
+         {
+            struct vki_rep_protocol_entity_setup *r =
+               (struct vki_rep_protocol_entity_setup *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_setup->rpr_entityid)", r->rpr_entityid);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_setup->rpr_entitytype)", r->rpr_entitytype);
+         }
+         break;
+      case VKI_REP_PROTOCOL_ENTITY_NAME:
+         {
+            struct vki_rep_protocol_entity_name *r =
+               (struct vki_rep_protocol_entity_name *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_name->rpr_entityid)", r->rpr_entityid);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_name->rpr_answertype)", r->rpr_answertype);
+         }
+         break;
+      case VKI_REP_PROTOCOL_ENTITY_GET:
+         {
+            struct vki_rep_protocol_entity_get *r =
+               (struct vki_rep_protocol_entity_get *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_get->rpr_entityid)", r->rpr_entityid);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_get->rpr_object)", r->rpr_object);
+         }
+         break;
+      case VKI_REP_PROTOCOL_ENTITY_GET_CHILD:
+         {
+            struct vki_rep_protocol_entity_get_child *r =
+               (struct vki_rep_protocol_entity_get_child *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_get_child->rpr_entityid)", r->rpr_entityid);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_get_child->rpr_childid)", r->rpr_childid);
+            PRE_MEM_RASCIIZ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                            "entity_get_child->rpr_name)", (Addr) r->rpr_name);
+         }
+         break;
+      case VKI_REP_PROTOCOL_ENTITY_GET_PARENT:
+         {
+            struct vki_rep_protocol_entity_parent *r =
+               (struct vki_rep_protocol_entity_parent *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_get_parent->rpr_entityid)", r->rpr_entityid);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_get_parent->rpr_outid)", r->rpr_outid);
+         }
+         break;
+      case VKI_REP_PROTOCOL_ENTITY_RESET:
+         {
+            struct vki_rep_protocol_entity_reset *r =
+               (struct vki_rep_protocol_entity_reset *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_reset->rpr_entityid)", r->rpr_entityid);
+         }
+         break;
+      case VKI_REP_PROTOCOL_ENTITY_TEARDOWN:
+         {
+            struct vki_rep_protocol_entity_teardown *r =
+               (struct vki_rep_protocol_entity_teardown *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "entity_teardown->rpr_entityid)", r->rpr_entityid);
+         }
+         break;
+      case VKI_REP_PROTOCOL_ITER_READ:
+         {
+            struct vki_rep_protocol_iter_read *r =
+               (struct vki_rep_protocol_iter_read *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "iter_read->rpr_iterid)", r->rpr_iterid);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "iter_read->rpr_sequence)", r->rpr_sequence);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "iter_read->rpr_entityid)", r->rpr_entityid);
+         }
+         break;
+      case VKI_REP_PROTOCOL_ITER_READ_VALUE:
+         {
+            struct vki_rep_protocol_iter_read_value *r =
+               (struct vki_rep_protocol_iter_read_value *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "iter_read_value->rpr_iterid)", r->rpr_iterid);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "iter_read_value->rpr_sequence)", r->rpr_sequence);
+         }
+         break;
+      case VKI_REP_PROTOCOL_ITER_RESET:
+      case VKI_REP_PROTOCOL_ITER_SETUP:
+      case VKI_REP_PROTOCOL_ITER_TEARDOWN:
+         {
+            struct vki_rep_protocol_iter_request *r =
+               (struct vki_rep_protocol_iter_request *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "iter_request->rpr_iterid)", r->rpr_iterid);
+         }
+         break;
+      case VKI_REP_PROTOCOL_ITER_START:
+         {
+            struct vki_rep_protocol_iter_start *r =
+               (struct vki_rep_protocol_iter_start *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "iter_start->rpr_iterid)", r->rpr_iterid);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "iter_start->rpr_entity)", r->rpr_entity);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "iter_start->rpr_itertype)", r->rpr_itertype);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "iter_start->rpr_flags)", r->rpr_flags);
+            PRE_MEM_RASCIIZ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                            "iter_start->rpr_pattern)", (Addr) r->rpr_pattern);
+         }
+         break;
+      case VKI_REP_PROTOCOL_PROPERTY_GET_TYPE:
+      case VKI_REP_PROTOCOL_PROPERTY_GET_VALUE:
+         {
+            struct vki_rep_protocol_property_request *r =
+               (struct vki_rep_protocol_property_request *) p;
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "property_request->rpr_entityid)", r->rpr_entityid);
+         }
+         break;
+      default:
+         VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME
+                            " where rpr_request=%u.", p->rpr_request);
+         /* NOTREACHED */
+         break;
+      }        
+   }
+}
+
+/* Handles repository door protocol response over client door fd. */
+static void repository_door_post_mem_door_call_hook(ThreadId tid, Int fd,
+                                                    void *rbuf, SizeT rsize)
+{
+   /* :TODO: Ideally we would need to match the response type with the
+      previous request because response itself does not contain any
+      type identification.
+      For now simply make defined whole response buffer. */
+   POST_MEM_WRITE((Addr) rbuf, rsize);
+}
+
+/* Pre-syscall checks for params->data_ptr contents of a door_call(). */
+static void door_call_pre_mem_params_data(ThreadId tid, Int fd,
+                                          void *data_ptr, SizeT data_size)
+{
+   const HChar *pathname;
+
+   /* Get pathname of the door file descriptor, if not already done.
+      Needed to dissect door service on the pathname below. */
+   if (!VG_(clo_track_fds) && !ML_(fd_recorded)(fd)) {
+      ML_(record_fd_open_named)(tid, fd);
+   }
+   pathname = ML_(find_fd_recorded_by_fd)(fd);
+
+   /* Debug-only printing. */
+   if (0) {
+      VG_(printf)("PRE(door_call) with fd=%d and filename=%s\n",
+                  fd, pathname);
+   }
+
+   if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
+      vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
+
+      PRE_FIELD_READ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
+                     "kcf_door_arg_t->da_version)", p->da_version);
+      PRE_FIELD_READ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
+                     "kcf_door_arg_t->da_iskernel)", p->da_iskernel);
+      PRE_MEM_RASCIIZ("door_call(\"" VKI__PATH_KCFD_DOOR "\", "
+                      "kcf_door_arg_t->da_u.filename)",
+                      (Addr) p->vki_da_u.filename);
+   } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
+      vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
+
+      PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
+                     "nss_pheader->nsc_callnumber)", p->nsc_callnumber);
+      if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
+         if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
+            /* request from an application towards nscd */
+            PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->p_version)", p->p_version);
+            PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->dbd_off)", p->dbd_off);
+            PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->dbd_len)", p->dbd_len);
+            PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->key_off)", p->key_off);
+            PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->key_len)", p->key_len);
+            PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->data_off)", p->data_off);
+            PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->data_len)", p->data_len);
+            /* Fields ext_off and ext_len are set only sporadically. */
+            PRE_FIELD_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->pbufsiz)", p->pbufsiz);
+            PRE_MEM_WRITE("door_call(\"" VKI_NAME_SERVICE_DOOR "\", pbuf)",
+                          (Addr) p, p->pbufsiz);
+
+            if (p->dbd_len > 0) {
+               vki_nss_dbd_t *dbd
+                  = (vki_nss_dbd_t *) ((HChar *) p + p->dbd_off);
+
+               PRE_MEM_READ("door_call(\"" VKI_NAME_SERVICE_DOOR
+                            "\", nss_dbd)", (Addr) dbd, sizeof(vki_nss_dbd_t));
+               if (ML_(safe_to_deref)(dbd, sizeof(vki_nss_dbd_t))) {
+                  if (dbd->o_name != 0)
+                     PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
+                                     "\", nss_dbd->o_name)", (Addr) ((HChar *) p
+                                     + p->dbd_off + dbd->o_name));
+                  if (dbd->o_config_name != 0)
+                     PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
+                                     "\", nss_dbd->o_config_name)",
+                                     (Addr) ((HChar *) p + p->dbd_off
+                                     + dbd->o_config_name));
+                  if (dbd->o_default_config != 0)
+                     PRE_MEM_RASCIIZ("door_call(\"" VKI_NAME_SERVICE_DOOR
+                                     "\", nss_dbd->o_default_config)",
+                                     (Addr) ((HChar *) p + p->dbd_off +
+                                     dbd->o_default_config));
+              }
+           }
+
+           PRE_MEM_READ("door_call(\"" VKI_NAME_SERVICE_DOOR "\", nss->key)",
+                        (Addr) ((HChar *) p + p->key_off), p->key_len);
+         } else {
+            /* request from a child nscd towards parent nscd */
+            VG_(unimplemented)("Door wrapper of child/parent nscd.");
+         }
+      }
+   } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
+      vki_repository_door_request_t *p =	
+         (vki_repository_door_request_t *) data_ptr;
+
+      PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                     "request->rdr_version)", p->rdr_version);
+      PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                     "request->rdr_request)", p->rdr_request);
+      if (ML_(safe_to_deref)(p, sizeof(vki_repository_door_request_t))) {
+         if (p->rdr_version == VKI_REPOSITORY_DOOR_VERSION) {
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "request->rdr_flags)", p->rdr_flags);
+            PRE_FIELD_READ("door_call(\"" VKI_REPOSITORY_DOOR_NAME "\", "
+                           "request->rdr_debug)", p->rdr_debug);
+         } else {
+            VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME
+                               " where version=%u.", p->rdr_version);
+         }
+      }
+   } else {
+      const OpenDoor *open_door = door_find_by_fd(fd);
+      if ((open_door != NULL) && (open_door->pre_mem_hook != NULL)) {
+         open_door->pre_mem_hook(tid, fd, data_ptr, data_size);
+      } else {
+         if (SimHintiS(SimHint_lax_doors, VG_(clo_sim_hints))) {
+            /*
+             * Be very lax about door syscall handling over unrecognized
+             * door file descriptors. Does not require that full buffer
+             * is initialized when writing. Without this, programs using
+             * libdoor(3LIB) functionality with completely proprietary
+             * semantics may report large number of false positives.
+             */
+         } else {
+            static Int moans = 3;
+
+            /* generic default */
+            if (moans > 0 && !VG_(clo_xml)) {
+               moans--;
+               VG_(umsg)(
+"Warning: noted and generically handled door call\n"
+"   on file descriptor %d (filename: %s).\n"
+"   This could cause spurious value errors to appear.\n"
+"   See README_MISSING_SYSCALL_OR_IOCTL for guidance on writing a proper wrapper.\n"
+"   Alternatively you may find '--sim-hints=lax-doors' option useful.\n",
+                         fd, pathname);
+            }
+            PRE_MEM_READ("door_call(params->data_ptr)",
+                         (Addr) data_ptr, data_size);
+         }
+      }
+   }
+}
+
+/* Post-syscall checks for params->rbuf contents of a door_call(). */
+static void door_call_post_mem_params_rbuf(ThreadId tid, Int fd,
+                                           void *rbuf, SizeT rsize,
+                                           const vki_door_desc_t *desc_ptr,
+                                           vki_uint_t desc_num)
+{
+   const HChar *pathname = ML_(find_fd_recorded_by_fd)(fd);
+
+   /* Debug-only printing. */
+   if (0) {
+      VG_(printf)("POST(door_call) with fd=%d and filename=%s\n",
+                  fd, pathname);
+   }
+
+   if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
+      vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) rbuf;
+
+      POST_FIELD_WRITE(p->da_version);
+      POST_FIELD_WRITE(p->vki_da_u.result.status);
+      POST_MEM_WRITE((Addr) p->vki_da_u.result.signature,
+                     p->vki_da_u.result.siglen);
+   } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
+      vki_nss_pheader_t *p = (vki_nss_pheader_t *) rbuf;
+
+      POST_FIELD_WRITE(p->nsc_callnumber);
+      if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
+         if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
+            /* response from nscd to an application */
+            POST_FIELD_WRITE(p->p_status);
+            POST_FIELD_WRITE(p->p_errno);
+            POST_FIELD_WRITE(p->p_herrno);
+            POST_FIELD_WRITE(p->dbd_off);
+            POST_FIELD_WRITE(p->dbd_len);
+            POST_FIELD_WRITE(p->key_off);
+            POST_FIELD_WRITE(p->key_len);
+            POST_FIELD_WRITE(p->data_off);
+            POST_FIELD_WRITE(p->data_len);
+            POST_FIELD_WRITE(p->ext_off);
+            POST_FIELD_WRITE(p->ext_len);
+            POST_FIELD_WRITE(p->pbufsiz);
+
+            if (p->pbufsiz <= rsize) {
+               if (p->dbd_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
+                  SizeT len = MIN(p->dbd_len, p->pbufsiz - p->dbd_off);
+                  POST_MEM_WRITE((Addr) ((HChar *) p + p->dbd_off), len);
+               }
+
+               if (p->key_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
+                  SizeT len = MIN(p->key_len, p->pbufsiz - p->key_off);
+                  POST_MEM_WRITE((Addr) ((HChar *) p + p->key_off), len);
+               }
+
+               if (p->data_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
+                  SizeT len = MIN(p->data_len, p->pbufsiz - p->data_off);
+                  POST_MEM_WRITE((Addr) ((HChar *) p + p->data_off), len);
+               }
+
+               if (p->ext_off < p->pbufsiz - sizeof(vki_nss_pheader_t)) {
+                  SizeT len = MIN(p->ext_len, p->pbufsiz - p->ext_off);
+                  POST_MEM_WRITE((Addr) ((HChar *) p + p->ext_off), len);
+               }
+            }
+         } else {
+            /* response from parent nscd to a child nscd */
+            VG_(unimplemented)("Door wrapper of child/parent nscd.");
+         }
+      }
+   } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
+      POST_FIELD_WRITE(((vki_repository_door_response_t *) rbuf)->rdr_status);
+      /* A new client door fd is passed over the global repository door. */
+      if ((desc_ptr != NULL) && (desc_num > 0)) {
+         if (desc_ptr[0].d_attributes & DOOR_DESCRIPTOR) {
+            door_record_client(tid, desc_ptr[0].d_data.d_desc.d_descriptor,
+                               repository_door_pre_mem_door_call_hook,
+                               repository_door_post_mem_door_call_hook);
+         }
+      }
+   } else {
+      const OpenDoor *open_door = door_find_by_fd(fd);
+      if ((open_door != NULL) && (open_door->post_mem_hook != NULL)) {
+         open_door->post_mem_hook(tid, fd, rbuf, rsize);
+      } else {
+         /* generic default */
+         POST_MEM_WRITE((Addr) rbuf, rsize);
+      }
+   }
+}
+
+/* Pre-syscall checks for data_ptr contents in a door_return(). */
+static void door_return_pre_mem_data(ThreadId tid, Addr server_procedure,
+                                     void *data_ptr, SizeT data_size)
+{
+   if ((data_size == 0) || (server_procedure == 0)) {
+      /* There is nothing to check. This usually happens during thread's
+         first call to door_return(). */
+      return;
+   }
+
+   /* Get pathname of the door file descriptor based on the
+      door server procedure (that's all we have).
+      Needed to dissect door service on the pathname below. */
+   const OpenDoor *open_door = door_find_by_proc(server_procedure);
+   const HChar *pathname = (open_door != NULL) ? open_door->pathname : NULL;
+   Int fd = (open_door != NULL) ? open_door->fd : -1;
+
+   /* Debug-only printing. */
+   if (0) {
+      VG_(printf)("PRE(door_return) with fd=%d and filename=%s "
+                  "(nr_doors_recorded=%u)\n",
+                  fd, pathname, nr_doors_recorded);
+   }
+
+   if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
+      vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
+
+      PRE_FIELD_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
+                     "kcf_door_arg_t->da_version)", p->da_version);
+      PRE_FIELD_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
+                     "kcf_door_arg_t->da_u.result.status)",
+                     p->vki_da_u.result.status);
+      PRE_MEM_READ("door_return(\"" VKI__PATH_KCFD_DOOR "\", "
+                   "kcf_door_arg_t->da_u.result.signature)",
+                   (Addr) p->vki_da_u.result.signature,
+                   p->vki_da_u.result.siglen);
+   } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
+      vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
+
+      PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
+                     "nss_pheader->nsc_callnumber)", p->nsc_callnumber);
+      if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
+         if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
+            /* response from nscd to an application */
+            PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->p_status)", p->p_status);
+            PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->p_errno)", p->p_errno);
+            PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->p_herrno)", p->p_herrno);
+            PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->dbd_off)", p->dbd_off);
+            PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->dbd_len)", p->dbd_len);
+            PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->data_off)", p->data_off);
+            PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->data_len)", p->data_len);
+            PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->ext_off)", p->ext_off);
+            PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->ext_len)", p->ext_len);
+            PRE_FIELD_READ("door_return(\"" VKI_NAME_SERVICE_DOOR "\", "
+                           "nss_pheader->pbufsiz)", p->pbufsiz);
+            PRE_MEM_WRITE("door_return(\"" VKI_NAME_SERVICE_DOOR "\", pbuf)",
+                          (Addr) p, p->pbufsiz);
+            PRE_MEM_READ("door_return(\"" VKI_NAME_SERVICE_DOOR
+                         "\", nss->data)",
+                         (Addr) ((HChar *) p + p->data_off), p->data_len);
+            PRE_MEM_READ("door_return(\"" VKI_NAME_SERVICE_DOOR
+                         "\", nss->ext)",
+                         (Addr) ((HChar *) p + p->ext_off), p->ext_len);
+         } else {
+            /* response from parent nscd to a child nscd */
+            VG_(unimplemented)("Door wrapper of child/parent nscd.");
+         }
+      }
+   } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
+            VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME);
+   } else {
+      if (SimHintiS(SimHint_lax_doors, VG_(clo_sim_hints))) {
+         /*
+          * Be very lax about door syscall handling over unrecognized
+          * door file descriptors. Does not require that full buffer
+          * is initialized when writing. Without this, programs using
+          * libdoor(3LIB) functionality with completely proprietary
+          * semantics may report large number of false positives.
+          */
+      } else {
+         static Int moans = 3;
+
+         /* generic default */
+         if (moans > 0 && !VG_(clo_xml)) {
+            moans--;
+            VG_(umsg)(
+"Warning: noted and generically handled door return\n"
+"   on file descriptor %d (filename: %s).\n"
+"   This could cause spurious value errors to appear.\n"
+"   See README_MISSING_SYSCALL_OR_IOCTL for guidance on writing a proper wrapper.\n"
+"   Alternatively you may find '--sim-hints=lax-doors' option useful.\n",
+                   fd, pathname);
+         }
+         PRE_MEM_READ("door_return(data_ptr)",
+                      (Addr) data_ptr, data_size);
+      }
+   }
+}
+
+/* Post-syscall checks for data_ptr contents in a door_return(). */
+static void door_return_post_mem_data(ThreadId tid, Addr server_procedure,
+                                      void *data_ptr, SizeT data_size)
+{
+   const OpenDoor *open_door = door_find_by_proc(server_procedure);
+   const HChar *pathname = (open_door != NULL) ? open_door->pathname : NULL;
+
+   /* Debug-only printing. */
+   if (0) {
+      Int fd = (open_door != NULL) ? open_door->fd : -1;
+      VG_(printf)("POST(door_return) with fd=%d and filename=%s "
+                  "(nr_doors_recorded=%u)\n",
+                  fd, pathname, nr_doors_recorded);
+   }
+
+   if (VG_STREQ(pathname, VKI__PATH_KCFD_DOOR)) {
+      vki_kcf_door_arg_t *p = (vki_kcf_door_arg_t *) data_ptr;
+
+      POST_FIELD_WRITE(p->da_version);
+      POST_FIELD_WRITE(p->da_iskernel);
+      POST_MEM_WRITE((Addr) p->vki_da_u.filename,
+                     VG_(strlen)(p->vki_da_u.filename) + 1);
+   } else if (VG_STREQ(pathname, VKI_NAME_SERVICE_DOOR)) {
+      vki_nss_pheader_t *p = (vki_nss_pheader_t *) data_ptr;
+
+      POST_FIELD_WRITE(p->nsc_callnumber);
+      if (ML_(safe_to_deref)(p, sizeof(vki_nss_pheader_t))) {
+         if ((p->nsc_callnumber & VKI_NSCDV2CATMASK) == VKI_NSCD_CALLCAT_APP) {
+            /* request from an application towards nscd */
+            POST_FIELD_WRITE(p->p_version);
+            POST_FIELD_WRITE(p->dbd_off);
+            POST_FIELD_WRITE(p->dbd_len);
+            POST_FIELD_WRITE(p->key_off);
+            POST_FIELD_WRITE(p->key_len);
+            POST_FIELD_WRITE(p->data_off);
+            POST_FIELD_WRITE(p->data_len);
+            POST_FIELD_WRITE(p->ext_off);
+            POST_FIELD_WRITE(p->ext_len);
+            POST_FIELD_WRITE(p->pbufsiz);
+
+            if (p->dbd_len > 0) {
+               vki_nss_dbd_t *dbd
+                  = (vki_nss_dbd_t *) ((HChar *) p + p->dbd_off);
+
+               POST_MEM_WRITE((Addr) dbd, sizeof(vki_nss_dbd_t));
+               if (ML_(safe_to_deref)(dbd, sizeof(vki_nss_dbd_t))) {
+                  SizeT headers_size = sizeof(vki_nss_pheader_t)
+                     + sizeof(vki_nss_dbd_t);
+
+                  if (dbd->o_name != 0) {
+                     HChar *name = (HChar *) p + p->dbd_off + dbd->o_name;
+                     SizeT name_len = VG_(strlen)(name) + 1;
+                     if (name_len <= data_size - headers_size)
+                        POST_MEM_WRITE((Addr) name, name_len);
+                  }
+                  if (dbd->o_config_name != 0) {
+                     HChar *name = (HChar *) p + p->dbd_off + dbd->o_config_name;
+                     SizeT name_len = VG_(strlen)(name) + 1;
+                     if (name_len <= data_size - headers_size)
+                        POST_MEM_WRITE((Addr) name, name_len);
+                  }
+                  if (dbd->o_default_config != 0) {
+                     HChar *name = (HChar *) p + p->dbd_off
+                        + dbd->o_default_config;
+                     SizeT name_len = VG_(strlen)(name) + 1;
+                     if (name_len <= data_size - headers_size)
+                        POST_MEM_WRITE((Addr) name, name_len);
+                  }
+              }
+           }
+
+           if (p->key_len <= data_size - p->key_off)
+              POST_MEM_WRITE((Addr) ((HChar *) p + p->key_off), p->key_len);
+         } else {
+            /* request from a child nscd towards parent nscd */
+            VG_(unimplemented)("Door wrapper of child/parent nscd.");
+         }
+      }
+   } else if (VG_STREQ(pathname, VKI_REPOSITORY_DOOR_NAME)) {
+            VG_(unimplemented)("Door wrapper of " VKI_REPOSITORY_DOOR_NAME);
+   } else {
+      /* generic default */
+      POST_MEM_WRITE((Addr) data_ptr, data_size);
+   }
+}
+
+PRE(sys_door)
+{
+   /* int doorfs(long arg1, long arg2, long arg3, long arg4, long arg5,
+                 long subcode); */
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   *flags |= SfMayBlock | SfPostOnFail;
+
+   PRINT("sys_door ( %#lx, %#lx, %#lx, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3,
+         ARG4, ARG5, ARG6);
+
+   /* Macro PRE_REG_READ6 cannot be simply used because not all ARGs are used
+      in door() syscall variants. Note that ARG6 (subcode) is used always. */
+#define PRE_REG_READ_SIXTH_ONLY         \
+   if (VG_(tdict).track_pre_reg_read) { \
+      PRA6("door", long, subcode);      \
+   }
+
+   switch (ARG6 /*subcode*/) {
+   case VKI_DOOR_CREATE:
+      PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
+      PRE_REG_READ_SIXTH_ONLY;
+      /* Note: the first argument to DOOR_CREATE is a server procedure.
+         This could lead to a problem if the kernel tries to force the
+         execution of this procedure, similarly to how signal handlers are
+         executed.   Fortunately, the kernel never does that (for user-space
+         server procedures).  The procedure is always executed by the standard
+         library. */
+      break;
+   case VKI_DOOR_REVOKE:
+      PRE_REG_READ1(long, "door", long, arg1);
+      PRE_REG_READ_SIXTH_ONLY;
+      if (!ML_(fd_allowed)(ARG1, "door_revoke", tid, False))
+         SET_STATUS_Failure(VKI_EBADF);
+      break;
+   case VKI_DOOR_INFO:
+      PRE_REG_READ2(long, "door", long, arg1, long, arg2);
+      PRE_REG_READ_SIXTH_ONLY;
+      PRE_MEM_WRITE("door_info(info)", ARG2, sizeof(vki_door_info_t));
+      break;
+   case VKI_DOOR_CALL:
+      {
+         PRE_REG_READ2(long, "door", long, arg1, long, arg2);
+         PRE_REG_READ_SIXTH_ONLY;
+
+         Int rval = 0;
+         vki_door_arg_t *params = (vki_door_arg_t*)ARG2;
+
+         if (!ML_(fd_allowed)(ARG1, "door_call", tid, False))
+            rval = VKI_EBADF;
+
+         PRE_FIELD_READ("door_call(params->data_ptr)", params->data_ptr);
+         PRE_FIELD_READ("door_call(params->data_size)", params->data_size);
+         PRE_FIELD_READ("door_call(params->desc_ptr)", params->desc_ptr);
+         PRE_FIELD_READ("door_call(params->desc_num)", params->desc_num);
+         PRE_FIELD_READ("door_call(params->rbuf)", params->rbuf);
+         PRE_FIELD_READ("door_call(params->rsize)", params->rsize);
+
+         if (ML_(safe_to_deref)(params, sizeof(*params))) {
+            if (params->data_ptr)
+               door_call_pre_mem_params_data(tid, ARG1, params->data_ptr,
+                                             params->data_size);
+
+            if (params->desc_ptr) {
+               SizeT desc_size = params->desc_num * sizeof(*params->desc_ptr);
+               PRE_MEM_READ("door_call(params->desc_ptr)",
+                            (Addr)params->desc_ptr, desc_size);
+
+               /* Do not record information about closed fds if we are going
+                  to fail the syscall and so no fds will be closed. */
+               if ((rval == 0) &&
+                   (ML_(safe_to_deref)(params->desc_ptr, desc_size))) {
+                     rval = pre_check_and_close_fds(tid, "door_call",
+                                                    params->desc_ptr,
+                                                    params->desc_num);
+               }
+            }
+
+            if (params->rbuf)
+               PRE_MEM_WRITE("door_call(params->rbuf)", (Addr)params->rbuf,
+                             params->rsize);
+         }
+
+         if (rval)
+            SET_STATUS_Failure(rval);
+      }
+      break;
+   case VKI_DOOR_BIND:
+      PRE_REG_READ1(long, "door", long, arg1);
+      PRE_REG_READ_SIXTH_ONLY;
+      VG_(unimplemented)("DOOR_BIND");
+      break;
+   case VKI_DOOR_UNBIND:
+      PRE_REG_READ0(long, "door");
+      PRE_REG_READ_SIXTH_ONLY;
+      VG_(unimplemented)("DOOR_UNBIND");
+      break;
+   case VKI_DOOR_UNREFSYS:
+      PRE_REG_READ0(long, "door");
+      PRE_REG_READ_SIXTH_ONLY;
+      VG_(unimplemented)("DOOR_UNREFSYS");
+      break;
+   case VKI_DOOR_UCRED:
+      PRE_REG_READ1(long, "door", long, arg1);
+      PRE_REG_READ_SIXTH_ONLY;
+      VG_(unimplemented)("DOOR_UCRED");
+      break;
+   case VKI_DOOR_RETURN:
+      PRE_REG_READ6(long, "door", long, arg1, long, arg2, long, arg3,
+                    long, arg4, long, arg5, long, subcode);
+
+      /* Register %esp/%rsp is read and modified by the syscall. */
+      VG_TRACK(pre_reg_read, Vg_CoreSysCall, tid, "door_return(sp)",
+               VG_O_STACK_PTR, sizeof(UWord));
+      /* Register %ebp/%rbp is not really read by the syscall, it is only
+         written by it, but it is hard to determine when it is written so we
+         make sure it is always valid prior to making the syscall. */
+      VG_TRACK(pre_reg_read, Vg_CoreSysCall, tid, "door_return(bp)",
+               VG_O_FRAME_PTR, sizeof(UWord));
+
+      door_return_pre_mem_data(tid, tst->os_state.door_return_procedure,
+                               (void *) ARG1, ARG2);
+
+      /* Do not tell the tool where the syscall is going to write the
+         resulting data.  It is necessary to skip this check because the data
+         area starting at ARG4-ARG5 (of length ARG5) is usually on a client
+         thread stack below the stack pointer and therefore it can be marked
+         by a tool (for example, Memcheck) as inaccessible.  It is ok to skip
+         this check in this case because if there is something wrong with the
+         data area then the syscall will fail or the error will be handled by
+         POST_MEM_WRITE() in the post wrapper. */
+      /*PRE_MEM_WRITE("door_return(sp)", ARG4 - ARG5, ARG5);*/
+
+      if (ARG3) {
+         vki_door_return_desc_t *desc_env = (vki_door_return_desc_t*)ARG3;
+
+         PRE_MEM_READ("door_return(desc_env)", ARG3,
+                      sizeof(vki_door_return_desc_t));
+
+         if (ML_(safe_to_deref)(desc_env, sizeof(*desc_env)) &&
+             desc_env->desc_ptr) {
+            Int rval;
+
+            PRE_MEM_READ("door_return(desc_env->desc_ptr)",
+                         (Addr)desc_env->desc_ptr,
+                         desc_env->desc_num * sizeof(*desc_env->desc_ptr));
+
+            rval = pre_check_and_close_fds(tid, "door_return",
+                                           desc_env->desc_ptr,
+                                           desc_env->desc_num);
+            if (rval)
+               SET_STATUS_Failure(rval);
+         }
+      }
+      tst->os_state.in_door_return = True;
+      tst->os_state.door_return_procedure = 0;
+      break;
+   case VKI_DOOR_GETPARAM:
+      PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
+      PRE_REG_READ_SIXTH_ONLY;
+      VG_(unimplemented)("DOOR_GETPARAM");
+      break;
+   case VKI_DOOR_SETPARAM:
+      PRE_REG_READ3(long, "door", long, arg1, long, arg2, long, arg3);
+      PRE_REG_READ_SIXTH_ONLY;
+      VG_(unimplemented)("DOOR_SETPARAM");
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the door call with subcode %ld.", ARG6);
+      /*NOTREACHED*/
+      break;
+   }
+
+#undef PRE_REG_READ_SIXTH_ONLY
+}
+
+POST(sys_door)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+
+   vg_assert(SUCCESS || FAILURE);
+
+   /* Alter the tst->os_state.in_door_return flag. */
+   if (ARG6 == VKI_DOOR_RETURN) {
+      vg_assert(tst->os_state.in_door_return == True);
+      tst->os_state.in_door_return = False;
+
+      /* Inform the tool that %esp/%rsp and %ebp/%rbp were (potentially)
+         modified. */
+      VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, VG_O_STACK_PTR,
+               sizeof(UWord));
+      VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, VG_O_FRAME_PTR,
+               sizeof(UWord));
+   }
+   else
+      vg_assert(tst->os_state.in_door_return == False);
+
+   if (FAILURE) {
+      if (VG_(clo_track_fds)) {
+         /* See the discussion in pre_check_and_close_fds() to understand this
+            part. */
+         Bool loss = False;
+         switch (ARG6 /*subcode*/) {
+         case VKI_DOOR_CALL:
+            if (ERR == VKI_EFAULT || ERR == VKI_EBADF)
+               loss = True;
+            break;
+         case VKI_DOOR_RETURN:
+            if (ERR == VKI_EFAULT || ERR == VKI_EINVAL)
+               loss = True;
+            break;
+         default:
+            break;
+         }
+         if (loss)
+            VG_(message)(Vg_UserMsg, "The door call failed with an "
+                                     "unexpected error and information "
+                                     "about open file descriptors can be "
+                                     "now imprecise.\n");
+      }
+
+      return;
+   }
+
+   vg_assert(SUCCESS);
+
+   switch (ARG6 /*subcode*/) {
+   case VKI_DOOR_CREATE:
+      door_record_server(tid, ARG1, RES);
+      break;
+   case VKI_DOOR_REVOKE:
+      door_revoke(tid, ARG1);
+      if (VG_(clo_track_fds))
+         ML_(record_fd_close)(ARG1);
+      break;
+   case VKI_DOOR_INFO:
+      POST_MEM_WRITE(ARG2, sizeof(vki_door_info_t));
+      break;
+   case VKI_DOOR_CALL:
+      {
+         /* Note that all returned values are stored in the rbuf, i.e.
+            data_ptr and desc_ptr points into this buffer. */
+         vki_door_arg_t *params = (vki_door_arg_t*)ARG2;
+
+         if (params->rbuf) {
+            Addr addr = (Addr)params->rbuf;
+            if (!VG_(am_find_anon_segment(addr))) {
+               /* This segment is new and was mapped by the kernel. */
+               UInt prot, flags;
+               SizeT size;
+
+               prot = VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC;
+               flags = VKI_MAP_ANONYMOUS;
+               size = VG_PGROUNDUP(params->rsize);
+
+               VG_(debugLog)(1, "syswrap-solaris", "POST(sys_door), "
+                                "new segment: vaddr=%#lx, size=%#lx, "
+                                "prot=%#x, flags=%#x, fd=%ld, offset=%#llx\n",
+                                addr, size, prot, flags, (UWord)-1, (ULong)0);
+
+               ML_(notify_core_and_tool_of_mmap)(addr, size, prot, flags,
+                                                 -1, 0);
+
+               /* Note: We don't notify the debuginfo reader about this
+                  mapping because there are no debug information stored in
+                  this segment. */
+            }
+
+            door_call_post_mem_params_rbuf(tid, ARG1, (void *) addr,
+                                           params->rsize, params->desc_ptr,
+                                           params->desc_num);
+         }
+
+         if (params->desc_ptr) {
+            POST_MEM_WRITE((Addr)params->desc_ptr,
+                           params->desc_num * sizeof(vki_door_desc_t));
+            post_record_fds(tid, "door_call", params->desc_ptr,
+                            params->desc_num);
+         }
+      }
+      break;
+   case VKI_DOOR_BIND:
+      break;
+   case VKI_DOOR_UNBIND:
+      break;
+   case VKI_DOOR_UNREFSYS:
+      break;
+   case VKI_DOOR_UCRED:
+      break;
+   case VKI_DOOR_RETURN:
+      {
+         struct vki_door_results *results
+            = (struct vki_door_results*)VG_(get_SP)(tid);
+
+         tst->os_state.door_return_procedure = (Addr)results->pc;
+
+         POST_MEM_WRITE((Addr)results, sizeof(*results));
+         if (results->data_ptr)
+            door_return_post_mem_data(tid,
+                                      tst->os_state.door_return_procedure,
+                                      results->data_ptr,
+                                      results->data_size);
+         if (results->desc_ptr) {
+            POST_MEM_WRITE((Addr)results->desc_ptr,
+                           results->desc_num * sizeof(vki_door_desc_t));
+            post_record_fds(tid, "door_return", results->desc_ptr,
+                            results->desc_num);
+         }
+
+         POST_MEM_WRITE((Addr)results->door_info,
+                        sizeof(*results->door_info));
+      }
+      break;
+   case VKI_DOOR_GETPARAM:
+      break;
+   case VKI_DOOR_SETPARAM:
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_schedctl)
+{
+   /* caddr_t schedctl(void); */
+   /* This syscall returns an address that points to struct sc_shared.
+      This per-thread structure is used as an interface between the libc and
+      the kernel. */
+   PRINT("sys_schedctl ( )");
+   PRE_REG_READ0(long, "schedctl");
+}
+
+POST(sys_schedctl)
+{
+   Addr a = RES;
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+
+   /* Stay sane. */
+   vg_assert((tst->os_state.schedctl_data == 0) ||
+             (tst->os_state.schedctl_data == a));
+   tst->os_state.schedctl_data = a;
+
+   /* Returned address points to a block in a mapped page. */
+   if (!VG_(am_find_anon_segment(a))) {
+      Addr page = VG_PGROUNDDN(a);
+      UInt prot = VKI_PROT_READ | VKI_PROT_WRITE | VKI_PROT_EXEC;
+      UInt flags = VKI_MAP_ANONYMOUS;
+      /* The kernel always allocates one page for the sc_shared struct. */
+      SizeT size = VKI_PAGE_SIZE;
+
+      VG_(debugLog)(1, "syswrap-solaris", "POST(sys_schedctl), new segment: "
+                    "vaddr=%#lx, size=%#lx, prot=%#x, flags=%#x, fd=-1, "
+                    "offset=0\n", page, size, prot, flags);
+
+      /* The kernel always places redzone before and after the allocated page.
+         Check this assertion now; the tool can later request to allocate
+         a Valgrind segment and aspacemgr will place it adjacent. */
+      const NSegment *seg = VG_(am_find_nsegment(page - 1));
+      vg_assert(seg == NULL || seg->kind == SkResvn);
+      seg = VG_(am_find_nsegment(page + VKI_PAGE_SIZE));
+      vg_assert(seg == NULL || seg->kind == SkResvn);
+
+      /* The address space manager works with whole pages. */
+      VG_(am_notify_client_mmap)(page, size, prot, flags, -1, 0);
+
+      /* Note: It isn't needed to notify debuginfo about the new mapping
+         because it's only an anonymous mapping. */
+      /* Note: schedctl data are cleaned in two places:
+         - for the tool when the thread exits
+         - for the core in child's post-fork handler clean_schedctl_data(). */
+   }
+
+   /* The tool needs per-thread granularity, not whole pages. */
+   VG_TRACK(new_mem_mmap, a, sizeof(struct vki_sc_shared), True, True, True, 0);
+   POST_MEM_WRITE(a, sizeof(struct vki_sc_shared));
+}
+
+PRE(sys_resolvepath)
+{
+   /* int resolvepath(const char *path, char *buf, size_t bufsiz); */
+   PRINT("sys_resolvepath ( %#lx(%s), %#lx, %lu )", ARG1, (HChar *) ARG1, ARG2,
+         ARG3);
+   PRE_REG_READ3(long, "resolvepath", const char *, path, char *, buf,
+                 vki_size_t, bufsiz);
+
+   PRE_MEM_RASCIIZ("resolvepath(path)", ARG1);
+   PRE_MEM_WRITE("resolvepath(buf)", ARG2, ARG3);
+}
+
+POST(sys_resolvepath)
+{
+   POST_MEM_WRITE(ARG2, RES);
+}
+
+PRE(sys_lwp_mutex_timedlock)
+{
+   /* int lwp_mutex_timedlock(lwp_mutex_t *lp, timespec_t *tsp,
+                              uintptr_t owner); */
+   vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *)ARG1;
+   *flags |= SfMayBlock;
+   PRINT("lwp_mutex_timedlock ( %#lx, %#lx, %#lx )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "lwp_mutex_timedlock", lwp_mutex_t *, lp,
+                 timespec_t *, tsp, uintptr_t, owner);
+
+   PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_flag)", lp->vki_mutex_flag);
+   PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_type)", lp->vki_mutex_type);
+   PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_owner)",
+                   lp->vki_mutex_owner);
+   PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_ownerpid)",
+                   lp->vki_mutex_ownerpid);
+   PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_lockw)", lp->vki_mutex_lockw);
+   /*PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_lockw)",
+                     lp->vki_mutex_lockw);*/
+   PRE_FIELD_READ("lwp_mutex_timedlock(lp->mutex_waiters)",
+                  lp->vki_mutex_waiters);
+   /*PRE_FIELD_WRITE("lwp_mutex_timedlock(lp->mutex_waiters)",
+                     lp->vki_mutex_waiters);*/
+   if (ARG2) {
+      PRE_MEM_READ("lwp_mutex_timedlock(tsp)", ARG2, sizeof(vki_timespec_t));
+      /*PRE_MEM_WRITE("lwp_mutex_timedlock(tsp)", ARG2,
+                      sizeof(vki_timespec_t));*/
+   }
+}
+
+POST(sys_lwp_mutex_timedlock)
+{
+   vki_lwp_mutex_t *lp = (vki_lwp_mutex_t *)ARG1;
+   POST_FIELD_WRITE(lp->vki_mutex_owner);
+   POST_FIELD_WRITE(lp->vki_mutex_ownerpid);
+   POST_FIELD_WRITE(lp->vki_mutex_lockw);
+   POST_FIELD_WRITE(lp->vki_mutex_waiters);
+   if (ARG2)
+      POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
+}
+
+PRE(sys_lwp_rwlock_sys)
+{
+   /* int lwp_rwlock_sys(int subcode, lwp_rwlock_t *rwlp, timespec_t *tsp); */
+   vki_lwp_rwlock_t *rwlp = (vki_lwp_rwlock_t *)ARG2;
+   switch (ARG1 /*subcode*/) {
+   case 0:
+   case 1:
+   case 2:
+   case 3:
+      *flags |= SfMayBlock;
+      switch (ARG1 /*subcode*/) {
+      case 0:
+         PRINT("sys_lwp_rwlock ( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3);
+         PRE_REG_READ3(long, SC2("lwp_rwlock", "rdlock"), int, subcode,
+                       lwp_rwlock_t *, rwlp, timespec_t *, tsp);
+         break;
+      case 1:
+         PRINT("sys_lwp_rwlock ( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3);
+         PRE_REG_READ3(long, SC2("lwp_rwlock", "wrlock"), int, subcode,
+                       lwp_rwlock_t *, rwlp, timespec_t *, tsp);
+         break;
+      case 2:
+         PRINT("sys_lwp_rwlock ( %ld, %#lx )", ARG1, ARG2);
+         PRE_REG_READ2(long, SC2("lwp_rwlock", "tryrdlock"), int, subcode,
+                       lwp_rwlock_t *, rwlp);
+         break;
+      case 3:
+         PRINT("sys_lwp_rwlock ( %ld, %#lx )", ARG1, ARG2);
+         PRE_REG_READ2(long, SC2("lwp_rwlock", "trywrlock"), int, subcode,
+                       lwp_rwlock_t *, rwlp);
+         break;
+      default:
+         vg_assert(0);
+         break;
+      }
+
+      PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_type)", rwlp->vki_rwlock_type);
+      PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_readers)",
+                     rwlp->vki_rwlock_readers);
+      /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->rwlock_readers)",
+                        rwlp->vki_rwlock_readers);*/
+
+      PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_type)",
+                     rwlp->mutex.vki_mutex_type);
+      PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_owner)",
+                      rwlp->mutex.vki_mutex_owner);
+      PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_ownerpid)",
+                      rwlp->mutex.vki_mutex_ownerpid);
+      /* The mutex_lockw member is not really read by the kernel for this
+         syscall but it seems better to mark it that way because when locking
+         an rwlock the associated mutex has to be locked. */
+      PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_lockw)",
+                     rwlp->mutex.vki_mutex_lockw);
+      /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_lockw)",
+                        rwlp->mutex.vki_mutex_lockw);*/
+      PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_waiters)",
+                     rwlp->mutex.vki_mutex_waiters);
+      /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->mutex.mutex_waiters)",
+                        rwlp->mutex.vki_mutex_waiters);*/
+
+      if ((ARG1 == 0 || ARG1 == 1) && ARG3)
+         PRE_MEM_READ("lwp_rwlock(tsp)", ARG3, sizeof(vki_timespec_t));
+      break;
+   case 4:
+      PRINT("sys_lwp_rwlock( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("lwp_rwlock", "unlock"), int, subcode,
+                    lwp_rwlock_t *, rwlp);
+      PRE_FIELD_READ("lwp_rwlock(rwlp->mutex.mutex_type)",
+                     rwlp->mutex.vki_mutex_type);
+      PRE_FIELD_READ("lwp_rwlock(rwlp->rwlock_readers)",
+                     rwlp->vki_rwlock_readers);
+      /*PRE_FIELD_WRITE("lwp_rwlock(rwlp->rwlock_readers)",
+                        rwlp->vki_rwlock_readers);*/
+      break;
+   default:
+      VG_(unimplemented)("Syswrap of the lwp_rwlock_sys call with subcode %ld.",
+                         ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+}
+
+POST(sys_lwp_rwlock_sys)
+{
+   vki_lwp_rwlock_t *rwlp = (vki_lwp_rwlock_t *)ARG2;
+   switch (ARG1 /*subcode*/) {
+   case 0:
+   case 1:
+   case 2:
+   case 3:
+      POST_FIELD_WRITE(rwlp->vki_rwlock_readers);
+      POST_FIELD_WRITE(rwlp->vki_rwlock_owner);
+      POST_FIELD_WRITE(rwlp->vki_rwlock_ownerpid);
+      POST_FIELD_WRITE(rwlp->mutex.vki_mutex_lockw);
+      POST_FIELD_WRITE(rwlp->mutex.vki_mutex_waiters);
+      break;
+   case 4:
+      POST_FIELD_WRITE(rwlp->vki_rwlock_readers);
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_lwp_sema_timedwait)
+{
+   /* int lwp_sema_timedwait(lwp_sema_t *sema, timespec_t *timeout,
+                             int check_park); */
+   vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
+   *flags |= SfMayBlock;
+   PRINT("sys_lwp_sema_timewait ( %#lx, %#lx, %ld )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "lwp_sema_timedwait", lwp_sema_t *, sema,
+                 timespec_t *, timeout, int, check_park);
+
+   PRE_FIELD_READ("lwp_sema_timedwait(sema->type)", sema->vki_sema_type);
+   PRE_FIELD_READ("lwp_sema_timedwait(sema->count)", sema->vki_sema_count);
+   /*PRE_FIELD_WRITE("lwp_sema_timedwait(sema->count)",
+                     sema->vki_sema_count);*/
+   PRE_FIELD_READ("lwp_sema_timedwait(sema->waiters)", sema->vki_sema_waiters);
+   /*PRE_FIELD_WRITE("lwp_sema_timedwait(sema->waiters)",
+                     sema->vki_sema_waiters);*/
+   if (ARG2) {
+      PRE_MEM_READ("lwp_sema_timedwait(timeout)", ARG2,
+                   sizeof(vki_timespec_t));
+      /*PRE_MEM_WRITE("lwp_sema_timedwait(timeout)", ARG2,
+                      sizeof(vki_timespec_t));*/
+   }
+}
+
+POST(sys_lwp_sema_timedwait)
+{
+   vki_lwp_sema_t *sema = (vki_lwp_sema_t*)ARG1;
+   POST_FIELD_WRITE(sema->vki_sema_count);
+   POST_FIELD_WRITE(sema->vki_sema_waiters);
+   if (ARG2)
+      POST_MEM_WRITE(ARG2, sizeof(vki_timespec_t));
+}
+
+PRE(sys_zone)
+{
+   /* Kernel: long zone(int cmd, void *arg1, void *arg2, void *arg3,
+                        void *arg4);
+    */
+   switch (ARG1 /*cmd*/) {
+   case VKI_ZONE_CREATE:
+      /* Libc: zoneid_t zone_create(const char *name, const char *root,
+                                    const struct priv_set *privs,
+                                    const char *rctls, size_t rctlsz,
+                                    const char *zfs, size_t zfssz,
+                                    int *extended_error, int match,
+                                    int doi, const bslabel_t *label,
+                                    int flags);
+        Kernel: zoneid_t zone_create(zone_def *zd);
+       */
+      PRINT("sys_zone ( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("zone", "create"), int, cmd,
+                    vki_zone_def *, zd);
+
+      vki_zone_def *zd = (vki_zone_def *) ARG2;
+      PRE_FIELD_READ("zone(zd.zone_name)", zd->zone_name);
+      PRE_FIELD_READ("zone(zd.zone_root)", zd->zone_root);
+      PRE_FIELD_READ("zone(zd.zone_privs)", zd->zone_privs);
+      PRE_FIELD_READ("zone(zd.zone_privssz)", zd->zone_privssz);
+      PRE_FIELD_READ("zone(zd.rctlbuf)", zd->rctlbuf);
+      PRE_FIELD_READ("zone(zd.rctlbufsz)", zd->rctlbufsz);
+      PRE_FIELD_READ("zone(zd.zfsbuf)", zd->zfsbuf);
+      PRE_FIELD_READ("zone(zd.zfsbufsz)", zd->zfsbufsz);
+      PRE_FIELD_READ("zone(zd.extended_error)", zd->extended_error);
+      PRE_FIELD_READ("zone(zd.match)", zd->match);
+      PRE_FIELD_READ("zone(zd.doi)", zd->doi);
+      PRE_FIELD_READ("zone(zd.label)", zd->label);
+      PRE_FIELD_READ("zone(zd.flags)", zd->flags);
+
+      if (ML_(safe_to_deref((void *)ARG2, sizeof(vki_zone_def)))) {
+         if (zd->zone_name)
+            PRE_MEM_RASCIIZ("zone(zd.zone_name)", (Addr) zd->zone_name);
+         if (zd->zone_root)
+            PRE_MEM_RASCIIZ("zone(zd.zone_root)", (Addr) zd->zone_root);
+         PRE_MEM_READ("zone(zd.zone_privs)", (Addr) zd->zone_privs,
+                      zd->zone_privssz);
+         PRE_MEM_READ("zone(zd.rctlbuf)", (Addr) zd->rctlbuf,
+                      zd->rctlbufsz);
+         PRE_MEM_READ("zone(zd.zfsbuf)",
+                      (Addr) zd->zfsbuf, zd->zfsbufsz);
+         if (zd->label)
+            PRE_MEM_READ("zone(zd.label)", (Addr) zd->label,
+                         sizeof(vki_bslabel_t));
+      }
+      break;
+   case VKI_ZONE_DESTROY:
+      /* Libc: int zone_destroy(zoneid_t zoneid); */
+      PRINT("sys_zone ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("zone", "destroy"), int, cmd,
+                    vki_zoneid_t, zoneid);
+      break;
+   case VKI_ZONE_GETATTR:
+      /* Libc: ssize_t zone_getattr(zoneid_t zoneid, int attr,
+                                    void *valp, size_t size);
+       */
+      PRINT("sys_zone ( %ld, %ld, %ld, %#lx, %ld )",
+            ARG1, ARG2, ARG3, ARG4, ARG5);
+      PRE_REG_READ5(long, SC2("zone", "getattr"), int, cmd,
+                    vki_zoneid_t, zoneid, int, attr, void *, valp,
+                    vki_size_t, size);
+      PRE_MEM_WRITE("zone(valp)", ARG4, ARG5);
+      break;
+   case VKI_ZONE_ENTER:
+      /* Libc: int zone_enter(zoneid_t zoneid); */
+      PRINT("sys_zone ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("zone", "enter"), int, cmd,
+                    vki_zoneid_t, zoneid);
+      break;
+   case VKI_ZONE_LIST:
+      /* Libc: int zone_list(zoneid_t *zonelist, uint_t *numzones); */
+      PRINT("sys_zone ( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("zone", "list"), int, cmd,
+                    vki_zoneid_t *, zonelist, vki_uint_t *, numzones);
+
+      PRE_MEM_WRITE("zone(numzones)", ARG3, sizeof(vki_uint_t));
+
+      if (ML_(safe_to_deref((void *) ARG3, sizeof(vki_uint_t)))) {
+         if (ARG2)
+            PRE_MEM_WRITE("zone(zonelist)", ARG2,
+                          *(vki_uint_t *) ARG3 * sizeof(vki_zoneid_t));
+      }
+      break;
+   case VKI_ZONE_SHUTDOWN:
+      /* Libc: int zone_shutdown(zoneid_t zoneid); */
+      PRINT("sys_zone ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("zone", "shutdown"), int, cmd,
+                    vki_zoneid_t, zoneid);
+      break;
+   case VKI_ZONE_LOOKUP:
+      /* Libc: zoneid_t zone_lookup(const char *name); */
+      PRINT("sys_zone ( %ld, %#lx )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("zone", "lookup"), int, cmd,
+                    const char *, name);
+      if (ARG2)
+         PRE_MEM_RASCIIZ("zone(name)", ARG2);
+      break;
+   case VKI_ZONE_BOOT:
+      /* Libc: int zone_boot(zoneid_t zoneid); */
+      PRINT("sys_zone ( %ld, %ld )", ARG1, ARG2);
+      PRE_REG_READ2(long, SC2("zone", "boot"), int, cmd,
+                    vki_zoneid_t, zoneid);
+      break;
+   case VKI_ZONE_SETATTR:
+      /* Libc: int zone_setattr(zoneid_t zoneid, int attr, void *valp,
+                                size_t size);
+       */
+      PRINT("sys_zone ( %ld, %ld, %ld, %#lx, %ld )",
+            ARG1, ARG2, ARG3, ARG4, ARG5);
+      PRE_REG_READ5(long, SC2("zone", "setattr"), int, cmd,
+                    vki_zoneid_t, zoneid, int, attr, void *, valp,
+                    vki_size_t, size);
+      PRE_MEM_READ("zone(valp)", ARG4, ARG5);
+      break;
+   case VKI_ZONE_ADD_DATALINK:
+      /* Libc: int zone_add_datalink(zoneid_t zoneid,
+                                     datalink_id_t linkid);
+       */
+      PRINT("sys_zone ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("zone", "add_datalink"), int, cmd,
+                    vki_zoneid_t, zoneid, vki_datalink_id_t, linkid);
+      break;
+   case VKI_ZONE_DEL_DATALINK:
+      /* Libc: int zone_remove_datalink(zoneid_t zoneid,
+                                        datalink_id_t linkid);
+       */
+      PRINT("sys_zone ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("zone", "del_datalink"), int, cmd,
+                    vki_zoneid_t, zoneid, vki_datalink_id_t, linkid);
+      break;
+   case VKI_ZONE_CHECK_DATALINK:
+      /* Libc: int zone_check_datalink(zoneid_t *zoneidp,
+                                       datalink_id_t linkid);
+      */
+      PRINT("sys_zone ( %ld, %#lx, %ld )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("zone", "check_datalink"), int, cmd,
+                    vki_zoneid_t *, zoneidp, vki_datalink_id_t, linkid);
+      PRE_MEM_WRITE("zone(zoneidp)", ARG2, sizeof(vki_zoneid_t));
+      break;
+   case VKI_ZONE_LIST_DATALINK:
+      /* Libc: int zone_list_datalink(zoneid_t zoneid, int *dlnump,
+                                      datalink_id_t *linkids);
+       */
+      PRINT("sys_zone ( %ld, %ld, %#lx, %#lx )", ARG1, ARG2, ARG3, ARG4);
+      PRE_REG_READ4(long, SC2("zone", "list_datalink"), int, cmd,
+                    vki_zoneid_t, zoneid, int *, dlnump,
+                    vki_datalink_id_t *, linkids);
+
+      PRE_MEM_WRITE("zone(dlnump)", ARG3, sizeof(int));
+      if (ML_(safe_to_deref((void *) ARG3, sizeof(int)))) {
+         if (ARG4)
+            PRE_MEM_WRITE("zone(linkids)", ARG4,
+                          *(int *) ARG3 * sizeof(vki_datalink_id_t));
+      }
+      break;
+#if defined(SOLARIS_ZONE_DEFUNCT)
+   case VKI_ZONE_LIST_DEFUNCT:
+      /* Libc: int zone_list_defunct(uint64_t *uniqidlist,
+                                     uint_t *numzones);
+       */
+      PRINT("sys_zone ( %ld, %#lx, %#lx )", ARG1, ARG2, ARG3);
+      PRE_REG_READ3(long, SC2("zone", "list_defunct"), int, cmd,
+                    vki_uint64_t *, uniqidlist, vki_uint_t *, numzones);
+
+      PRE_MEM_WRITE("zone(numzones)", ARG3, sizeof(vki_uint_t));
+
+      if (ML_(safe_to_deref((void *) ARG3, sizeof(vki_uint_t)))) {
+         if (ARG2)
+            PRE_MEM_WRITE("zone(uniqidlist)", ARG2,
+                          *(vki_uint_t *) ARG3 * sizeof(vki_uint64_t));
+      }
+      break;
+   case VKI_ZONE_GETATTR_DEFUNCT:
+      /* Libc: ssize_t zone_getattr_defunct(uint64_t uniqid, int attr,
+                                            void *valp, size_t size);
+         Kernel: ssize_t zone_getattr_defunct(uint64_t *uniqid, int attr,
+                                              void *valp, size_t size);
+       */
+      PRINT("sys_zone ( %ld, %#lx, %ld, %#lx, %ld )",
+            ARG1, ARG2, ARG3, ARG4, ARG5);
+      PRE_REG_READ5(long, SC2("zone", "getattr_defunct"), int, cmd,
+                    vki_uint64_t *, uniqid, int, attr,
+                    void *, valp, vki_size_t, size);
+
+      PRE_MEM_READ("zone(uniqid)", ARG2, sizeof(vki_uint64_t));
+      PRE_MEM_WRITE("zone(valp)", ARG4, ARG5);
+      break;
+#endif /* SOLARIS_ZONE_DEFUNCT */
+   default:
+      VG_(unimplemented)("Syswrap of the zone call with cmd %ld.", ARG1);
+      /*NOTREACHED*/
+      break;
+   }
+
+}
+
+POST(sys_zone)
+{
+   switch (ARG1 /*cmd*/) {
+   case VKI_ZONE_CREATE:
+   case VKI_ZONE_DESTROY:
+      break;
+   case VKI_ZONE_GETATTR:
+      POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
+      break;
+   case VKI_ZONE_ENTER:
+      break;
+   case VKI_ZONE_LIST:
+      POST_MEM_WRITE(ARG2, *(vki_uint_t *) ARG3 * sizeof(vki_zoneid_t));
+      break;
+   case VKI_ZONE_SHUTDOWN:
+   case VKI_ZONE_LOOKUP:
+   case VKI_ZONE_BOOT:
+   case VKI_ZONE_SETATTR:
+   case VKI_ZONE_ADD_DATALINK:
+   case VKI_ZONE_DEL_DATALINK:
+      break;
+   case VKI_ZONE_CHECK_DATALINK:
+      POST_MEM_WRITE(ARG2, sizeof(vki_zoneid_t));
+      break;
+   case VKI_ZONE_LIST_DATALINK:
+      POST_MEM_WRITE(ARG4, *(int *) ARG3 * sizeof(vki_datalink_id_t));
+      break;
+#if defined(SOLARIS_ZONE_DEFUNCT)
+   case VKI_ZONE_LIST_DEFUNCT:
+      POST_MEM_WRITE(ARG2, *(vki_uint_t *) ARG3 * sizeof(vki_uint64_t));
+      break;
+   case VKI_ZONE_GETATTR_DEFUNCT:
+      POST_MEM_WRITE(ARG4, MIN(RES, ARG5));
+      break;
+#endif /* SOLARIS_ZONE_DEFUNCT */
+   default:
+      vg_assert(0);
+      break;
+   }
+}
+
+PRE(sys_getcwd)
+{
+   /* int getcwd(char *buf, size_t size); */
+   /* Note: Generic getcwd() syswrap can't be used because it expects
+      a different return value. */
+   PRINT("sys_getcwd ( %#lx, %lu )", ARG1, ARG2);
+   PRE_REG_READ2(long, "getcwd", char *, buf, vki_size_t, size);
+   PRE_MEM_WRITE("getcwd(buf)", ARG1, ARG2);
+}
+
+POST(sys_getcwd)
+{
+   POST_MEM_WRITE(ARG1, VG_(strlen)((HChar*)ARG1) + 1);
+}
+
+PRE(sys_so_socket)
+{
+   /* int so_socket(int family, int type, int protocol, char *devpath,
+                    int version); */
+   PRINT("sys_so_socket ( %ld, %ld, %ld, %#lx(%s), %ld)", ARG1, ARG2, ARG3,
+         ARG4, (HChar *) ARG4, ARG5);
+   PRE_REG_READ5(long, "socket", int, family, int, type, int, protocol,
+                 char *, devpath, int, version);
+   if (ARG4)
+      PRE_MEM_RASCIIZ("socket(devpath)", ARG4);
+}
+
+POST(sys_so_socket)
+{
+   SysRes r;
+   r = ML_(generic_POST_sys_socket)(tid, VG_(mk_SysRes_Success)(RES));
+   SET_STATUS_from_SysRes(r);
+}
+
+PRE(sys_so_socketpair)
+{
+   /* int so_socketpair(int sv[2]); */
+   /* This syscall is used to connect two already created sockets together. */
+   PRINT("sys_so_socketpair ( %#lx )", ARG1);
+   PRE_REG_READ1(long, "socketpair", int *, sv);
+   PRE_MEM_READ("socketpair(sv)", ARG1, 2 * sizeof(int));
+   /*PRE_MEM_WRITE("socketpair(sv)", ARG1, 2 * sizeof(int));*/
+   if (ML_(safe_to_deref)((void*)ARG1, 2 * sizeof(int))) {
+      int *fds = (int*)ARG1;
+      if (!ML_(fd_allowed)(fds[0], "socketpair", tid, False))
+         SET_STATUS_Failure(VKI_EBADF);
+      else if (!ML_(fd_allowed)(fds[1], "socketpair", tid, False))
+         SET_STATUS_Failure(VKI_EBADF);
+   }
+}
+
+POST(sys_so_socketpair)
+{
+   /* The kernel can return new file descriptors, in such a case we have to
+      validate them. */
+   int *fds = (int*)ARG1;
+   POST_MEM_WRITE(ARG1, 2 * sizeof(int));
+   if (!ML_(fd_allowed)(fds[0], "socketpair", tid, True))
+      SET_STATUS_Failure(VKI_EMFILE);
+   if (!ML_(fd_allowed)(fds[1], "socketpair", tid, True))
+      SET_STATUS_Failure(VKI_EMFILE);
+   if (FAILURE) {
+      /* One or both of the file descriptors weren't allowed, close newly
+         created file descriptors but don't close the already recorded
+         ones. */
+      if (!ML_(fd_recorded)(fds[0]))
+         VG_(close)(fds[0]);
+      if (!ML_(fd_recorded)(fds[1]))
+         VG_(close)(fds[1]);
+   }
+   else if (VG_(clo_track_fds)) {
+      /* Everything went better than expected, record the newly created file
+         descriptors.  Note: If the kernel actually returns the original file
+         descriptors, then ML_(record_fd_open_nameless) notices that these
+         file descriptors have been already recorded. */
+      ML_(record_fd_open_nameless)(tid, fds[0]);
+      ML_(record_fd_open_nameless)(tid, fds[1]);
+   }
+}
+
+PRE(sys_bind)
+{
+   /* int bind(int s, struct sockaddr *name, socklen_t namelen,
+               int version); */
+   PRINT("sys_bind ( %ld, %#lx, %lu, %ld )",ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "bind", int, s, struct sockaddr *, name,
+                 vki_socklen_t, namelen, int, version);
+   ML_(generic_PRE_sys_bind)(tid, ARG1, ARG2, ARG3);
+}
+
+PRE(sys_listen)
+{
+   /* int listen(int s, int backlog, int version); */
+   PRINT("sys_listen ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "listen", int, s, int, backlog, int, version);
+}
+
+PRE(sys_accept)
+{
+#if defined(SOLARIS_NEW_ACCEPT_SYSCALL)
+   /* int accept(int s, struct sockaddr *addr, socklen_t *addrlen,
+                 int version, int flags); */
+   *flags |= SfMayBlock;
+   PRINT("sys_accept ( %ld, %#lx, %#lx, %ld, %ld )", ARG1, ARG2, ARG3, ARG4,
+         ARG5);
+   PRE_REG_READ5(long, "accept", int, s, struct sockaddr *, addr,
+                 socklen_t *, addrlen, int, version, int, flags);
+#else
+   /* int accept(int s, struct sockaddr *addr, socklen_t *addrlen,
+                 int version); */
+   *flags |= SfMayBlock;
+   PRINT("sys_accept ( %ld, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "accept", int, s, struct sockaddr *, addr,
+                 socklen_t *, addrlen, int, version);
+#endif /* SOLARIS_NEW_ACCEPT_SYSCALL */
+   ML_(generic_PRE_sys_accept)(tid, ARG1, ARG2, ARG3);
+}
+
+POST(sys_accept)
+{
+   SysRes r;
+   r = ML_(generic_POST_sys_accept)(tid, VG_(mk_SysRes_Success)(RES),
+                                    ARG1, ARG2, ARG3);
+   SET_STATUS_from_SysRes(r);
+}
+
+PRE(sys_connect)
+{
+   /* int connect(int s, struct sockaddr *name, socklen_t namelen,
+                  int version); */
+   *flags |= SfMayBlock;
+   PRINT("sys_connect ( %ld, %#lx, %lu, %ld )",ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "connect", int, s, struct sockaddr *, name,
+                 vki_socklen_t, namelen, int, version);
+   ML_(generic_PRE_sys_connect)(tid, ARG1, ARG2, ARG3);
+}
+
+PRE(sys_shutdown)
+{
+   /* Kernel: int shutdown(int sock, int how, int version);
+      Libc:   int shutdown(int sock, int how);
+    */
+   *flags |= SfMayBlock;
+   PRINT("sys_shutdown ( %ld, %ld, %ld )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(int, "shutdown", int, sock, int, how, int, version);
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "shutdown", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_recv)
+{
+   /* ssize_t recv(int s, void *buf, size_t len, int flags); */
+   *flags |= SfMayBlock;
+   PRINT("sys_recv ( %ld, %#lx, %lu, %ld )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "recv", int, s, void *, buf, vki_size_t, len,
+                 int, flags);
+   ML_(generic_PRE_sys_recv)(tid, ARG1, ARG2, ARG3);
+}
+
+POST(sys_recv)
+{
+   ML_(generic_POST_sys_recv)(tid, RES, ARG1, ARG2, ARG3);
+}
+
+PRE(sys_recvfrom)
+{
+   /* ssize_t recvfrom(int s, void *buf, size_t len, int flags,
+                       struct sockaddr *from, socklen_t *fromlen); */
+   *flags |= SfMayBlock;
+   PRINT("sys_recvfrom ( %ld, %#lx, %lu, %ld, %#lx, %#lx )", ARG1, ARG2, ARG3,
+         ARG4, ARG5, ARG6);
+   PRE_REG_READ6(long, "recvfrom", int, s, void *, buf, vki_size_t, len,
+                 int, flags, struct sockaddr *, from, socklen_t *, fromlen);
+   ML_(generic_PRE_sys_recvfrom)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
+}
+
+POST(sys_recvfrom)
+{
+   ML_(generic_POST_sys_recvfrom)(tid, VG_(mk_SysRes_Success)(RES),
+                                  ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
+}
+
+PRE(sys_recvmsg)
+{
+   /* ssize_t recvmsg(int s, struct msghdr *msg, int flags); */
+   *flags |= SfMayBlock;
+   PRINT("sys_recvmsg ( %ld, %#lx, %ld )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "recvmsg", int, s, struct msghdr *, msg, int, flags);
+   ML_(generic_PRE_sys_recvmsg)(tid, "msg", (struct vki_msghdr*)ARG2);
+}
+
+POST(sys_recvmsg)
+{
+   ML_(generic_POST_sys_recvmsg)(tid, "msg", (struct vki_msghdr*)ARG2, RES);
+}
+
+PRE(sys_send)
+{
+   /* ssize_t send(int s, const void *msg, size_t len, int flags); */
+   *flags |= SfMayBlock;
+   PRINT("sys_send ( %ld, %#lx, %lu, %ld )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "send", int, s, const void *, msg, vki_size_t, len,
+                 int, flags);
+   ML_(generic_PRE_sys_send)(tid, ARG1, ARG2, ARG3);
+}
+
+PRE(sys_sendmsg)
+{
+   /* ssize_t sendmsg(int s, const struct msghdr *msg, int flags); */
+   *flags |= SfMayBlock;
+   PRINT("sys_sendmsg ( %ld, %#lx, %ld )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "sendmsg", int, s, const struct msghdr *, msg,
+                 int, flags);
+   ML_(generic_PRE_sys_sendmsg)(tid, "msg", (struct vki_msghdr*)ARG2);
+}
+
+PRE(sys_sendto)
+{
+   /* ssize_t sendto(int s, const void *msg, size_t len, int flags,
+                     const struct sockaddr *to, int tolen); */
+   *flags |= SfMayBlock;
+   PRINT("sys_sendto ( %ld, %#lx, %lu, %ld, %#lx, %ld )", ARG1, ARG2, ARG3,
+         ARG4, ARG5, ARG6);
+   PRE_REG_READ6(long, "sendto", int, s, const void *, msg, vki_size_t, len,
+                 int, flags, const struct sockaddr *, to, int, tolen);
+   ML_(generic_PRE_sys_sendto)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, ARG6);
+}
+
+PRE(sys_getpeername)
+{
+   /* Kernel: int getpeername(int s, struct sockaddr *name,
+                              socklen_t *namelen, int version);
+      Libc:   int getpeername(int s, struct sockaddr *name,
+                              socklen_t *namelen);
+    */
+   *flags |= SfMayBlock;
+   PRINT("sys_getpeername ( %ld, %#lx, %#lx, %ld )",
+         ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "getpeername", int, s, struct vki_sockaddr *, name,
+                 vki_socklen_t *, namelen, int, version);
+   ML_(buf_and_len_pre_check)(tid, ARG2, ARG3, "getpeername(name)",
+                              "getpeername(namelen)");
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "getpeername", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_getpeername)
+{
+   ML_(buf_and_len_post_check)(tid, VG_(mk_SysRes_Success)(RES),
+                               ARG2, ARG3, "getpeername(namelen)");
+}
+
+PRE(sys_getsockname)
+{
+   /* int getsockname(int s, struct sockaddr *name, socklen_t *namelen,
+                      int version); */
+   PRINT("sys_getsockname ( %ld, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "getsockname", int, s, struct sockaddr *, name,
+                 socklen_t *, namelen, int, version);
+   ML_(generic_PRE_sys_getsockname)(tid, ARG1, ARG2, ARG3);
+}
+
+POST(sys_getsockname)
+{
+   ML_(generic_POST_sys_getsockname)(tid, VG_(mk_SysRes_Success)(RES),
+                                     ARG1, ARG2, ARG3);
+}
+
+PRE(sys_getsockopt)
+{
+   /* int getsockopt(int s, int level, int optname, void *optval,
+                     socklen_t *optlen, int version); */
+   PRINT("sys_getsockopt ( %ld, %ld, %ld, %#lx, %#lx, %ld )", ARG1, ARG2,
+         ARG3, ARG4, ARG5, ARG6);
+   PRE_REG_READ6(long, "getsockopt", int, s, int, level, int, optname,
+                 void *, optval, socklen_t *, option, int, version);
+   if (ARG4)
+      ML_(buf_and_len_pre_check)(tid, ARG4, ARG5, "getsockopt(optval)",
+                                 "getsockopt(optlen)");
+}
+
+POST(sys_getsockopt)
+{
+   if (ARG4)
+      ML_(buf_and_len_post_check)(tid, VG_(mk_SysRes_Success)(RES), ARG4,
+                                  ARG5, "getsockopt(optlen_out)");
+}
+
+PRE(sys_setsockopt)
+{
+   /* int setsockopt(int s, int level, int optname, const void *optval,
+                     socklen_t optlen, int version); */
+   PRINT("sys_setsockopt ( %ld, %ld, %ld, %#lx, %lu, %ld )", ARG1, ARG2, ARG3,
+         ARG4, ARG5, ARG6);
+   PRE_REG_READ6(long, "setsockopt", int, s, int, level, int, optname,
+                 const void *, optval, vki_socklen_t, optlen, int, version);
+   ML_(generic_PRE_sys_setsockopt)(tid, ARG1, ARG2, ARG3, ARG4, ARG5);
+}
+
+PRE(sys_lwp_mutex_register)
+{
+   /* int lwp_mutex_register(lwp_mutex_t *mp, caddr_t uaddr); */
+   vki_lwp_mutex_t *mp = (vki_lwp_mutex_t*)ARG1;
+   PRINT("sys_lwp_mutex_register ( %#lx, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "lwp_mutex_register", lwp_mutex_t *, mp,
+                 void *, uaddr);
+   PRE_FIELD_READ("lwp_mutex_register(mp->mutex_type)", mp->vki_mutex_type);
+}
+
+PRE(sys_uucopy)
+{
+   /* int uucopy(const void *s1, void *s2, size_t n); */
+   PRINT("sys_uucopy ( %#lx, %#lx, %lu )", ARG1, ARG2, ARG3);
+   PRE_REG_READ3(long, "uucopy", const void *, s1, void *, s2, vki_size_t, n);
+
+   /* Stay away from V segments. */
+   if (!ML_(valid_client_addr)(ARG1, ARG3, tid, "uucopy(s1)")) {
+      SET_STATUS_Failure(VKI_EFAULT);
+   }
+   if (!ML_(valid_client_addr)(ARG2, ARG3, tid, "uucopy(s2)")) {
+      SET_STATUS_Failure(VKI_EFAULT);
+   }
+
+   if (FAILURE)
+      return;
+
+   /* XXX This is actually incorrect, we should be able to copy undefined
+      values through to their new destination. */
+   PRE_MEM_READ("uucopy(s1)", ARG1, ARG3);
+   PRE_MEM_WRITE("uucopy(s2)", ARG2, ARG3);
+}
+
+POST(sys_uucopy)
+{
+   POST_MEM_WRITE(ARG2, ARG3);
+}
+
+PRE(sys_umount2)
+{
+   /* int umount2(const char *file, int mflag); */
+   *flags |= SfMayBlock;
+   PRINT("sys_umount2 ( %#lx(%s), %ld )", ARG1, (HChar *) ARG1, ARG2);
+   PRE_REG_READ2(long, "umount2", const char *, file, int, mflag);
+   PRE_MEM_RASCIIZ("umount2(file)", ARG1);
+}
+
+PRE(fast_gethrtime)
+{
+   PRINT("fast_gethrtime ( )");
+   PRE_REG_READ0(long, "gethrtime");
+}
+
+PRE(fast_gethrvtime)
+{
+   PRINT("fast_gethrvtime ( )");
+   PRE_REG_READ0(long, "gethrvtime");
+}
+
+PRE(fast_gethrestime)
+{
+   /* Used by gettimeofday(3C). */
+   PRINT("fast_gethrestime ( )");
+   PRE_REG_READ0(long, "gethrestime");
+}
+
+#if defined(SOLARIS_GETHRT_FASTTRAP)
+PRE(fast_gethrt)
+{
+   /* Used by gethrtime(3C) when tsp & tscp HWCAPs are present. */
+   PRINT("fast_gethrt ( )");
+   PRE_REG_READ0(long, "gethrt");
+}
+
+POST(fast_gethrt)
+{
+   if (RES == 0)
+      return;
+
+   /* Returned address points to a memory mapping shared between kernel
+      and the process. This was already pre-arranged during process address
+      space initialization happening in kernel. Valgrind on startup created
+      a segment for this mapping categorized as Valgrind's owned anonymous.
+      Size of this mapping varies among Solaris versions but should be
+      page aligned. */
+   const NSegment *seg = VG_(am_find_anon_segment)(RES);
+   vg_assert(seg != NULL);
+   vg_assert(seg->start == RES);
+   vg_assert(VG_IS_PAGE_ALIGNED(seg->start));
+   vg_assert(VG_IS_PAGE_ALIGNED(seg->end + 1));
+   SizeT size = seg->end - seg->start + 1;
+   vg_assert(size > 0);
+
+   if (!VG_(am_is_valid_for_client)(RES, size, VKI_PROT_READ)) {
+      Bool change_ownership_v_c_OK
+         = VG_(am_change_ownership_v_to_c)(RES, size);
+      vg_assert(change_ownership_v_c_OK);
+
+      /* Tell the tool about just discovered mapping. */
+      VG_TRACK(new_mem_startup,
+               RES, size,
+               True  /* readable? */,
+               False /* writable? */,
+               False /* executable? */,
+               0     /* di_handle */);
+   }
+}
+#endif /* SOLARIS_GETHRT_FASTTRAP */
+
+#if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
+PRE(fast_getzoneoffset)
+{
+   /* Returns kernel's time zone offset data. */
+   PRINT("fast_getzoneoffset ( )");
+   PRE_REG_READ0(long, "get_zone_offset");
+}
+
+POST(fast_getzoneoffset)
+{
+   if (RES == 0)
+      return;
+
+   /* Returned address points to a memory mapping shared between kernel
+      and the process. This was already pre-arranged during process address
+      space initialization happening in kernel. Valgrind on startup created
+      a segment for this mapping categorized as Valgrind's owned anonymous.
+      Size of this mapping varies among Solaris versions but should be
+      page aligned. */
+   const NSegment *seg = VG_(am_find_anon_segment)(RES);
+   vg_assert(seg != NULL);
+   vg_assert(seg->start == RES);
+   vg_assert(VG_IS_PAGE_ALIGNED(seg->start));
+   vg_assert(VG_IS_PAGE_ALIGNED(seg->end + 1));
+   SizeT size = seg->end - seg->start + 1;
+   vg_assert(size > 0);
+
+   if (!VG_(am_is_valid_for_client)(RES, size, VKI_PROT_READ)) {
+      Bool change_ownership_v_c_OK
+         = VG_(am_change_ownership_v_to_c)(RES, size);
+      vg_assert(change_ownership_v_c_OK);
+
+      /* Tell the tool about just discovered mapping. */
+      VG_TRACK(new_mem_startup,
+               RES, size,
+               True  /* readable? */,
+               False /* writable? */,
+               False /* executable? */,
+               0     /* di_handle */);
+   }
+}
+#endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
+
+#undef PRE
+#undef POST
+
+/* ---------------------------------------------------------------------
+   The Solaris syscall table
+   ------------------------------------------------------------------ */
+
+/* Add a Solaris-specific, arch-independent wrapper to a syscall table. */
+#define SOLX_(sysno, name) \
+   WRAPPER_ENTRY_X_(solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
+#define SOLXY(sysno, name) \
+   WRAPPER_ENTRY_XY(solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
+
+#if defined(VGP_x86_solaris)
+/* Add an x86-solaris specific wrapper to a syscall table. */
+#define PLAX_(sysno, name) \
+   WRAPPER_ENTRY_X_(x86_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
+#define PLAXY(sysno, name) \
+   WRAPPER_ENTRY_XY(x86_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
+
+#elif defined(VGP_amd64_solaris)
+/* Add an amd64-solaris specific wrapper to a syscall table. */
+#define PLAX_(sysno, name) \
+   WRAPPER_ENTRY_X_(amd64_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
+#define PLAXY(sysno, name) \
+   WRAPPER_ENTRY_XY(amd64_solaris, VG_SOLARIS_SYSNO_INDEX(sysno), name)
+
+#else
+#  error "Unknown platform"
+#endif
+
+/*
+   GEN   : handlers are in syswrap-generic.c
+   SOL   : handlers are in this file
+      X_ : PRE handler only
+      XY : PRE and POST handlers
+*/
+
+static SyscallTableEntry syscall_table[] = {
+   SOLX_(__NR_exit,                 sys_exit),                  /*   1 */
+#if defined(SOLARIS_SPAWN_SYSCALL)
+   SOLX_(__NR_spawn,                sys_spawn),                 /*   2 */
+#endif /* SOLARIS_SPAWN_SYSCALL */
+   GENXY(__NR_read,                 sys_read),                  /*   3 */
+   GENX_(__NR_write,                sys_write),                 /*   4 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   SOLXY(__NR_open,                 sys_open),                  /*   5 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   SOLXY(__NR_close,                sys_close),                 /*   6 */
+   SOLX_(__NR_linkat,               sys_linkat),                /*   7 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   GENX_(__NR_link,                 sys_link),                  /*   9 */
+   GENX_(__NR_unlink,               sys_unlink),                /*  10 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   SOLX_(__NR_symlinkat,            sys_symlinkat),             /*  11 */
+   GENX_(__NR_chdir,                sys_chdir),                 /*  12 */
+   SOLX_(__NR_time,                 sys_time),                  /*  13 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   GENX_(__NR_chmod,                sys_chmod),                 /*  15 */
+   GENX_(__NR_chown,                sys_chown),                 /*  16 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   SOLX_(__NR_brk,                  sys_brk),                   /*  17 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   SOLXY(__NR_stat,                 sys_stat),                  /*  18 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   SOLX_(__NR_lseek,                sys_lseek),                 /*  19 */
+   GENX_(__NR_getpid,               sys_getpid),                /*  20 */
+   SOLXY(__NR_mount,                sys_mount),                 /*  21 */
+   SOLXY(__NR_readlinkat,           sys_readlinkat),            /*  22 */
+   GENX_(__NR_setuid,               sys_setuid),                /*  23 */
+   GENX_(__NR_getuid,               sys_getuid),                /*  24 */
+   SOLX_(__NR_stime,                sys_stime),                 /*  25 */
+   GENX_(__NR_alarm,                sys_alarm),                 /*  27 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   SOLXY(__NR_fstat,                sys_fstat),                 /*  28 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   GENX_(__NR_pause,                sys_pause),                 /*  29 */
+#if defined(SOLARIS_FREALPATHAT_SYSCALL)
+   SOLXY(__NR_frealpathat,          sys_frealpathat),           /*  30 */
+#endif /* SOLARIS_FREALPATHAT_SYSCALL */
+   SOLX_(__NR_stty,                 sys_stty),                  /*  31 */
+   SOLXY(__NR_gtty,                 sys_gtty),                  /*  32 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   GENX_(__NR_access,               sys_access),                /*  33 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   GENX_(__NR_kill,                 sys_kill),                  /*  37 */
+   SOLX_(__NR_pgrpsys,              sys_pgrpsys),               /*  39 */
+   SOLXY(__NR_pipe,                 sys_pipe),                  /*  42 */
+   GENXY(__NR_times,                sys_times),                 /*  43 */
+   SOLX_(__NR_faccessat,            sys_faccessat),             /*  45 */
+   GENX_(__NR_setgid,               sys_setgid),                /*  46 */
+   GENX_(__NR_getgid,               sys_getgid),                /*  47 */
+   SOLXY(__NR_mknodat,              sys_mknodat),               /*  48 */
+   SOLXY(__NR_sysi86,               sys_sysi86),                /*  50 */
+   SOLXY(__NR_shmsys,               sys_shmsys),                /*  52 */
+   SOLXY(__NR_semsys,               sys_semsys),                /*  53 */
+   SOLXY(__NR_ioctl,                sys_ioctl),                 /*  54 */
+   SOLX_(__NR_fchownat,             sys_fchownat),              /*  56 */
+   SOLX_(__NR_fdsync,               sys_fdsync),                /*  58 */
+   SOLX_(__NR_execve,               sys_execve),                /*  59 */
+   GENX_(__NR_umask,                sys_umask),                 /*  60 */
+   GENX_(__NR_chroot,               sys_chroot),                /*  61 */
+   SOLXY(__NR_fcntl,                sys_fcntl),                 /*  62 */
+   SOLX_(__NR_renameat,             sys_renameat),              /*  64 */
+   SOLX_(__NR_unlinkat,             sys_unlinkat),              /*  65 */
+   SOLXY(__NR_fstatat,              sys_fstatat),               /*  66 */
+#if defined(VGP_x86_solaris)
+   PLAXY(__NR_fstatat64,            sys_fstatat64),             /*  67 */
+#endif /* VGP_x86_solaris */
+   SOLXY(__NR_openat,               sys_openat),                /*  68 */
+#if defined(VGP_x86_solaris)
+   PLAXY(__NR_openat64,             sys_openat64),              /*  69 */
+#endif /* VGP_x86_solaris */
+   SOLXY(__NR_tasksys,              sys_tasksys),               /*  70 */
+   SOLXY(__NR_getpagesizes,         sys_getpagesizes),          /*  73 */
+   SOLXY(__NR_lwp_park,             sys_lwp_park),              /*  77 */
+   SOLXY(__NR_sendfilev,            sys_sendfilev),             /*  78 */
+#if defined(SOLARIS_LWP_NAME_SYSCALL)
+   SOLXY(__NR_lwp_name,             sys_lwp_name),              /*  79 */
+#endif /* SOLARIS_LWP_NAME_SYSCALL */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   GENX_(__NR_rmdir,                sys_rmdir),                 /*  79 */
+   GENX_(__NR_mkdir,                sys_mkdir),                 /*  80 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   GENXY(__NR_getdents,             sys_getdents),              /*  81 */
+   SOLXY(__NR_privsys,              sys_privsys),               /*  82 */
+   SOLXY(__NR_ucredsys,             sys_ucredsys),              /*  83 */
+   SOLXY(__NR_getmsg,               sys_getmsg),                /*  85 */
+   SOLX_(__NR_putmsg,               sys_putmsg),                /*  86 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   SOLXY(__NR_lstat,                sys_lstat),                 /*  88 */
+   GENX_(__NR_symlink,              sys_symlink),               /*  89 */
+   GENX_(__NR_readlink,             sys_readlink),              /*  90 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   GENX_(__NR_setgroups,            sys_setgroups),             /*  91 */
+   GENXY(__NR_getgroups,            sys_getgroups),             /*  92 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   GENX_(__NR_fchmod,               sys_fchmod),                /*  93 */
+   GENX_(__NR_fchown,               sys_fchown),                /*  94 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   SOLXY(__NR_sigprocmask,          sys_sigprocmask),           /*  95 */
+   GENXY(__NR_sigaltstack,          sys_sigaltstack),           /*  97 */
+   SOLXY(__NR_sigaction,            sys_sigaction),             /*  98 */
+   SOLXY(__NR_sigpending,           sys_sigpending),            /*  99 */
+   SOLX_(__NR_context,              sys_getsetcontext),         /* 100 */
+   SOLX_(__NR_fchmodat,             sys_fchmodat),              /* 101 */
+   SOLX_(__NR_mkdirat,              sys_mkdirat),               /* 102 */
+   SOLXY(__NR_statvfs,              sys_statvfs),               /* 103 */
+   SOLXY(__NR_fstatvfs,             sys_fstatvfs),              /* 104 */
+   SOLXY(__NR_nfssys,               sys_nfssys),                /* 106 */
+   SOLXY(__NR_waitid,               sys_waitid),                /* 107 */
+#if defined(SOLARIS_UTIMESYS_SYSCALL)
+   SOLX_(__NR_utimesys,             sys_utimesys),              /* 110 */
+#endif /* SOLARIS_UTIMESYS_SYSCALL */
+#if defined(SOLARIS_UTIMENSAT_SYSCALL)
+   SOLX_(__NR_utimensat,            sys_utimensat),             /* 110 */
+#endif /* SOLARIS_UTIMENSAT_SYSCALL */
+   SOLXY(__NR_sigresend,            sys_sigresend),             /* 111 */
+   SOLXY(__NR_priocntlsys,          sys_priocntlsys),           /* 112 */
+   SOLX_(__NR_pathconf,             sys_pathconf),              /* 113 */
+   SOLX_(__NR_mmap,                 sys_mmap),                  /* 115 */
+   GENXY(__NR_mprotect,             sys_mprotect),              /* 116 */
+   GENXY(__NR_munmap,               sys_munmap),                /* 117 */
+   GENXY(__NR_readv,                sys_readv),                 /* 121 */
+   GENX_(__NR_writev,               sys_writev),                /* 122 */
+#if defined(SOLARIS_UUIDSYS_SYSCALL)
+   SOLXY(__NR_uuidsys,              sys_uuidsys),               /* 124 */
+#endif /* SOLARIS_UUIDSYS_SYSCALL */
+   SOLX_(__NR_mmapobj,              sys_mmapobj),               /* 127 */
+   GENX_(__NR_setrlimit,            sys_setrlimit),             /* 128 */
+   GENXY(__NR_getrlimit,            sys_getrlimit),             /* 129 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   GENX_(__NR_lchown,               sys_lchown),                /* 130 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   SOLX_(__NR_memcntl,              sys_memcntl),               /* 131 */
+   SOLXY(__NR_getpmsg,              sys_getpmsg),               /* 132 */
+   SOLX_(__NR_putpmsg,              sys_putpmsg),               /* 133 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   SOLX_(__NR_rename,               sys_rename),                /* 134 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   SOLXY(__NR_uname,                sys_uname),                 /* 135 */
+   SOLX_(__NR_setegid,              sys_setegid),               /* 136 */
+   SOLX_(__NR_sysconfig,            sys_sysconfig),             /* 137 */
+   SOLXY(__NR_systeminfo,           sys_systeminfo),            /* 139 */
+   SOLX_(__NR_seteuid,              sys_seteuid),               /* 141 */
+   SOLX_(__NR_forksys,              sys_forksys),               /* 142 */
+   SOLXY(__NR_sigtimedwait,         sys_sigtimedwait),          /* 144 */
+   SOLX_(__NR_yield,                sys_yield),                 /* 146 */
+   SOLXY(__NR_lwp_sema_post,        sys_lwp_sema_post),         /* 148 */
+   SOLXY(__NR_lwp_sema_trywait,     sys_lwp_sema_trywait),      /* 149 */
+   SOLX_(__NR_lwp_detach,           sys_lwp_detach),            /* 150 */
+   SOLX_(__NR_fchroot,              sys_fchroot),               /* 153 */
+   SOLXY(__NR_gettimeofday,         sys_gettimeofday),          /* 156 */
+   GENXY(__NR_getitimer,            sys_getitimer),             /* 157 */
+   GENXY(__NR_setitimer,            sys_setitimer),             /* 158 */
+   SOLX_(__NR_lwp_create,           sys_lwp_create),            /* 159 */
+   SOLX_(__NR_lwp_exit,             sys_lwp_exit),              /* 160 */
+   SOLX_(__NR_lwp_suspend,          sys_lwp_suspend),           /* 161 */
+   SOLX_(__NR_lwp_continue,         sys_lwp_continue),          /* 162 */
+#if defined(SOLARIS_LWP_SIGQUEUE_SYSCALL)
+   SOLXY(__NR_lwp_sigqueue,         sys_lwp_sigqueue),          /* 163 */
+#else
+   SOLXY(__NR_lwp_kill,             sys_lwp_kill),              /* 163 */
+#endif /* SOLARIS_LWP_SIGQUEUE_SYSCALL */
+   SOLX_(__NR_lwp_self,             sys_lwp_self),              /* 164 */
+   SOLX_(__NR_lwp_sigmask,          sys_lwp_sigmask),           /* 165 */
+   SOLX_(__NR_lwp_private,          sys_lwp_private),           /* 166 */
+   SOLXY(__NR_lwp_wait,             sys_lwp_wait),              /* 167 */
+   SOLXY(__NR_lwp_mutex_wakeup,     sys_lwp_mutex_wakeup),      /* 168 */
+   SOLX_(__NR_lwp_cond_broadcast,   sys_lwp_cond_broadcast),    /* 172 */
+   SOLXY(__NR_pread,                sys_pread),                 /* 173 */
+   SOLX_(__NR_pwrite,               sys_pwrite),                /* 174 */
+#if defined(VGP_x86_solaris)
+   PLAX_(__NR_llseek,               sys_llseek32),              /* 175 */
+#endif /* VGP_x86_solaris */
+   SOLXY(__NR_rusagesys,            sys_rusagesys),             /* 181 */
+   SOLXY(__NR_port,                 sys_port),                  /* 182 */
+   SOLXY(__NR_pollsys,              sys_pollsys),               /* 183 */
+   SOLXY(__NR_labelsys,             sys_labelsys),              /* 184 */
+   SOLXY(__NR_acl,                  sys_acl),                   /* 185 */
+   SOLXY(__NR_auditsys,             sys_auditsys),              /* 186 */
+   SOLX_(__NR_p_online,             sys_p_online),              /* 189 */
+   SOLX_(__NR_sigqueue,             sys_sigqueue),              /* 190 */
+   SOLX_(__NR_clock_gettime,        sys_clock_gettime),         /* 191 */
+   SOLX_(__NR_clock_settime,        sys_clock_settime),         /* 192 */
+   SOLXY(__NR_clock_getres,         sys_clock_getres),          /* 193 */
+   SOLXY(__NR_timer_create,         sys_timer_create),          /* 194 */
+   SOLX_(__NR_timer_delete,         sys_timer_delete),          /* 195 */
+   SOLXY(__NR_timer_settime,        sys_timer_settime),         /* 196 */
+   SOLXY(__NR_timer_gettime,        sys_timer_gettime),         /* 197 */
+   SOLX_(__NR_timer_getoverrun,     sys_timer_getoverrun),      /* 198 */
+   GENXY(__NR_nanosleep,            sys_nanosleep),             /* 199 */
+   SOLXY(__NR_facl,                 sys_facl),                  /* 200 */
+   SOLXY(__NR_door,                 sys_door),                  /* 201 */
+   GENX_(__NR_setreuid,             sys_setreuid),              /* 202 */
+   GENX_(__NR_setregid,             sys_setregid),              /* 202 */
+   SOLXY(__NR_schedctl,             sys_schedctl),              /* 206 */
+   SOLXY(__NR_resolvepath,          sys_resolvepath),           /* 209 */
+   SOLXY(__NR_lwp_mutex_timedlock,  sys_lwp_mutex_timedlock),   /* 210 */
+   SOLXY(__NR_lwp_sema_timedwait,   sys_lwp_sema_timedwait),    /* 211 */
+   SOLXY(__NR_lwp_rwlock_sys,       sys_lwp_rwlock_sys),        /* 212 */
+#if defined(VGP_x86_solaris)
+   GENXY(__NR_getdents64,           sys_getdents64),            /* 213 */
+   PLAX_(__NR_mmap64,               sys_mmap64),                /* 214 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   PLAXY(__NR_stat64,               sys_stat64),                /* 215 */
+   PLAXY(__NR_lstat64,              sys_lstat64),               /* 216 */
+   PLAXY(__NR_fstat64,              sys_fstat64),               /* 217 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+   PLAXY(__NR_statvfs64,            sys_statvfs64),             /* 218 */
+   PLAXY(__NR_fstatvfs64,           sys_fstatvfs64),            /* 219 */
+#endif /* VGP_x86_solaris */
+#if defined(VGP_x86_solaris)
+   PLAX_(__NR_setrlimit64,          sys_setrlimit64),           /* 220 */
+   PLAXY(__NR_getrlimit64,          sys_getrlimit64),           /* 221 */
+   PLAXY(__NR_pread64,              sys_pread64),               /* 222 */
+   PLAX_(__NR_pwrite64,             sys_pwrite64),              /* 223 */
+#if defined(SOLARIS_OLD_SYSCALLS)
+   PLAXY(__NR_open64,               sys_open64),                /* 225 */
+#endif /* SOLARIS_OLD_SYSCALLS */
+#endif /* VGP_x86_solaris */
+   SOLXY(__NR_zone,                 sys_zone),                  /* 227 */
+   SOLXY(__NR_getcwd,               sys_getcwd),                /* 229 */
+   SOLXY(__NR_so_socket,            sys_so_socket),             /* 230 */
+   SOLXY(__NR_so_socketpair,        sys_so_socketpair),         /* 231 */
+   SOLX_(__NR_bind,                 sys_bind),                  /* 232 */
+   SOLX_(__NR_listen,               sys_listen),                /* 233 */
+   SOLXY(__NR_accept,               sys_accept),                /* 234 */
+   SOLX_(__NR_connect,              sys_connect),               /* 235 */
+   SOLX_(__NR_shutdown,             sys_shutdown),              /* 236 */
+   SOLXY(__NR_recv,                 sys_recv),                  /* 237 */
+   SOLXY(__NR_recvfrom,             sys_recvfrom),              /* 238 */
+   SOLXY(__NR_recvmsg,              sys_recvmsg),               /* 239 */
+   SOLX_(__NR_send,                 sys_send),                  /* 240 */
+   SOLX_(__NR_sendmsg,              sys_sendmsg),               /* 241 */
+   SOLX_(__NR_sendto,               sys_sendto),                /* 242 */
+   SOLXY(__NR_getpeername,          sys_getpeername),           /* 243 */
+   SOLXY(__NR_getsockname,          sys_getsockname),           /* 244 */
+   SOLXY(__NR_getsockopt,           sys_getsockopt),            /* 245 */
+   SOLX_(__NR_setsockopt,           sys_setsockopt),            /* 246 */
+   SOLX_(__NR_lwp_mutex_register,   sys_lwp_mutex_register),    /* 252 */
+   SOLXY(__NR_uucopy,               sys_uucopy),                /* 254 */
+   SOLX_(__NR_umount2,              sys_umount2)                /* 255 */
+};
+
+static SyscallTableEntry fasttrap_table[] = {
+   SOLX_(__NR_gethrtime,            fast_gethrtime),            /*   3 */
+   SOLX_(__NR_gethrvtime,           fast_gethrvtime),           /*   4 */
+   SOLX_(__NR_gethrestime,          fast_gethrestime)           /*   5 */
+#if defined(SOLARIS_GETHRT_FASTTRAP)
+   ,
+   SOLXY(__NR_gethrt,               fast_gethrt)                /*   7 */
+#endif /* SOLARIS_GETHRT_FASTTRAP */
+#if defined(SOLARIS_GETZONEOFFSET_FASTTRAP)
+   ,
+   SOLXY(__NR_getzoneoffset,        fast_getzoneoffset)         /*   8 */
+#endif /* SOLARIS_GETZONEOFFSET_FASTTRAP */
+
+};
+
+SyscallTableEntry *ML_(get_solaris_syscall_entry)(UInt sysno)
+{
+   const UInt syscall_table_size
+      = sizeof(syscall_table) / sizeof(syscall_table[0]);
+   const UInt fasttrap_table_size
+      = sizeof(fasttrap_table) / sizeof(fasttrap_table[0]);
+
+   SyscallTableEntry *table;
+   Int size;
+
+   switch (VG_SOLARIS_SYSNO_CLASS(sysno)) {
+   case VG_SOLARIS_SYSCALL_CLASS_CLASSIC:
+      table = syscall_table;
+      size = syscall_table_size;
+      break;
+   case VG_SOLARIS_SYSCALL_CLASS_FASTTRAP:
+      table = fasttrap_table;
+      size = fasttrap_table_size;
+      break;
+   default:
+      vg_assert(0);
+      break;
+   }
+   sysno = VG_SOLARIS_SYSNO_INDEX(sysno);
+   if (sysno < size) {
+      SyscallTableEntry *sys = &table[sysno];
+      if (!sys->before)
+         return NULL; /* no entry */
+      return sys;
+   }
+
+   /* Can't find a wrapper. */
+   return NULL;
+}
+
+#endif // defined(VGO_solaris)
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_syswrap/syswrap-x86-solaris.c b/coregrind/m_syswrap/syswrap-x86-solaris.c
new file mode 100644
index 0000000..81a082c
--- /dev/null
+++ b/coregrind/m_syswrap/syswrap-x86-solaris.c
@@ -0,0 +1,990 @@
+
+/*--------------------------------------------------------------------*/
+/*--- Platform-specific syscalls stuff.      syswrap-x86-solaris.c ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2011-2014 Petr Pavlu
+      setup@dagobah.cz
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+#if defined(VGP_x86_solaris)
+
+#include "libvex_guest_offsets.h"
+#include "pub_core_basics.h"
+#include "pub_core_vki.h"
+#include "pub_core_threadstate.h"
+#include "pub_core_aspacemgr.h"
+#include "pub_core_xarray.h"
+#include "pub_core_clientstate.h"
+#include "pub_core_debuglog.h"
+#include "pub_core_libcassert.h"
+#include "pub_core_libcbase.h"
+#include "pub_core_libcfile.h"
+#include "pub_core_libcprint.h"
+#include "pub_core_libcsignal.h"
+#include "pub_core_machine.h"           // VG_(get_SP)
+#include "pub_core_mallocfree.h"
+#include "pub_core_options.h"
+#include "pub_core_tooliface.h"
+#include "pub_core_signals.h"
+#include "pub_core_syscall.h"
+#include "pub_core_syswrap.h"
+
+#include "priv_types_n_macros.h"
+#include "priv_syswrap-generic.h"
+#include "priv_syswrap-solaris.h"
+
+/* Call f(arg1), but first switch stacks, using 'stack' as the new stack, and
+   use 'retaddr' as f's return-to address.  Also, clear all the integer
+   registers before entering f. */
+__attribute__((noreturn))
+void ML_(call_on_new_stack_0_1)(Addr stack,             /* 4(%esp) */
+                                Addr retaddr,           /* 8(%esp) */
+                                void (*f)(Word),        /* 12(%esp) */
+                                Word arg1);             /* 16(%esp) */
+__asm__ (
+".text\n"
+".globl vgModuleLocal_call_on_new_stack_0_1\n"
+"vgModuleLocal_call_on_new_stack_0_1:\n"
+"   movl  %esp, %esi\n"         /* remember old stack pointer */
+"   movl  4(%esi), %esp\n"      /* set stack */
+"   pushl $0\n"                 /* align stack */
+"   pushl $0\n"                 /* align stack */
+"   pushl $0\n"                 /* align stack */
+"   pushl 16(%esi)\n"           /* arg1 to stack */
+"   pushl 8(%esi)\n"            /* retaddr to stack */
+"   pushl 12(%esi)\n"           /* f to stack */
+"   movl  $0, %eax\n"           /* zero all GP regs */
+"   movl  $0, %ebx\n"
+"   movl  $0, %ecx\n"
+"   movl  $0, %edx\n"
+"   movl  $0, %esi\n"
+"   movl  $0, %edi\n"
+"   movl  $0, %ebp\n"
+"   ret\n"                      /* jump to f */
+"   ud2\n"                      /* should never get here */
+".previous\n"
+);
+
+/* This function is called to setup a context of a new Valgrind thread (which
+   will run the client code). */
+void ML_(setup_start_thread_context)(ThreadId tid, vki_ucontext_t *uc)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   UWord *stack = (UWord*)tst->os_state.valgrind_stack_init_SP;
+   UShort cs, ds, ss, es, fs, gs;
+
+   VG_(memset)(uc, 0, sizeof(*uc));
+   uc->uc_flags = VKI_UC_CPU | VKI_UC_SIGMASK;
+
+   /* Start the thread with everything blocked. */
+   VG_(sigfillset)(&uc->uc_sigmask);
+
+   /* Set up the stack, it should be always 16-byte aligned before doing
+      a function call, i.e. the first parameter is also 16-byte aligned. */
+   vg_assert(VG_IS_16_ALIGNED(stack));
+   stack -= 1;
+   stack[0] = 0; /* bogus return value */
+   stack[1] = (UWord)tst; /* the parameter */
+
+   /* Set up the registers. */
+   uc->uc_mcontext.gregs[VKI_EIP] = (UWord)ML_(start_thread_NORETURN);
+   uc->uc_mcontext.gregs[VKI_UESP] = (UWord)stack;
+
+   /* Copy segment registers. */
+   __asm__ __volatile__(
+      "movw %%cs, %[cs]\n"
+      "movw %%ds, %[ds]\n"
+      "movw %%ss, %[ss]\n"
+      "movw %%es, %[es]\n"
+      "movw %%fs, %[fs]\n"
+      "movw %%gs, %[gs]\n"
+      : [cs] "=m" (cs), [ds] "=m" (ds), [ss] "=m" (ss), [es] "=m" (es),
+        [fs] "=m" (fs), [gs] "=m" (gs));
+   uc->uc_mcontext.gregs[VKI_CS] = cs;
+   uc->uc_mcontext.gregs[VKI_DS] = ds;
+   uc->uc_mcontext.gregs[VKI_SS] = ss;
+   uc->uc_mcontext.gregs[VKI_ES] = es;
+   uc->uc_mcontext.gregs[VKI_FS] = fs;
+   uc->uc_mcontext.gregs[VKI_GS] = gs;
+}
+
+/* Architecture-specific part of VG_(save_context). */
+void ML_(save_machine_context)(ThreadId tid, vki_ucontext_t *uc,
+                               CorePart part)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   struct vki_fpchip_state *fs
+      = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
+   SizeT i;
+
+   /* CPU */
+   /* Common registers */
+   uc->uc_mcontext.gregs[VKI_EIP] = tst->arch.vex.guest_EIP;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EIP,
+            (Addr)&uc->uc_mcontext.gregs[VKI_EIP], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_EAX] = tst->arch.vex.guest_EAX;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EAX,
+            (Addr)&uc->uc_mcontext.gregs[VKI_EAX], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_EBX] = tst->arch.vex.guest_EBX;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EBX,
+            (Addr)&uc->uc_mcontext.gregs[VKI_EBX], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_ECX] = tst->arch.vex.guest_ECX;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ECX,
+            (Addr)&uc->uc_mcontext.gregs[VKI_ECX], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_EDX] = tst->arch.vex.guest_EDX;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EDX,
+            (Addr)&uc->uc_mcontext.gregs[VKI_EDX], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_EBP] = tst->arch.vex.guest_EBP;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EBP,
+            (Addr)&uc->uc_mcontext.gregs[VKI_EBP], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_ESI] = tst->arch.vex.guest_ESI;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ESI,
+            (Addr)&uc->uc_mcontext.gregs[VKI_ESI], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_EDI] = tst->arch.vex.guest_EDI;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_EDI,
+            (Addr)&uc->uc_mcontext.gregs[VKI_EDI], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_UESP] = tst->arch.vex.guest_ESP;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ESP,
+            (Addr)&uc->uc_mcontext.gregs[VKI_UESP], sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_ESP] = 0;
+   VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_ESP],
+            sizeof(UWord));
+
+   /* ERR and TRAPNO */
+   uc->uc_mcontext.gregs[VKI_ERR] = 0;
+   VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_ERR],
+            sizeof(UWord));
+   uc->uc_mcontext.gregs[VKI_TRAPNO] = 0;
+   VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_TRAPNO],
+            sizeof(UWord));
+
+   /* Segment registers */
+   /* Note that segment registers are 16b in VEX, but 32b in mcontext.  Thus
+      we tell a tool that the lower 16 bits were copied and that the higher 16
+      bits were set (to zero).  (This assumes a little-endian
+      architecture.) */
+   uc->uc_mcontext.gregs[VKI_CS] = tst->arch.vex.guest_CS;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_CS,
+            (Addr)&uc->uc_mcontext.gregs[VKI_CS], sizeof(UShort));
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)(&uc->uc_mcontext.gregs[VKI_CS]) + 2, sizeof(UShort));
+   uc->uc_mcontext.gregs[VKI_DS] = tst->arch.vex.guest_DS;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_DS,
+            (Addr)&uc->uc_mcontext.gregs[VKI_DS], sizeof(UShort));
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)(&uc->uc_mcontext.gregs[VKI_DS]) + 2, sizeof(UShort));
+   uc->uc_mcontext.gregs[VKI_SS] = tst->arch.vex.guest_SS;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_SS,
+            (Addr)&uc->uc_mcontext.gregs[VKI_SS], sizeof(UShort));
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)(&uc->uc_mcontext.gregs[VKI_SS]) + 2, sizeof(UShort));
+   uc->uc_mcontext.gregs[VKI_ES] = tst->arch.vex.guest_ES;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_ES,
+            (Addr)&uc->uc_mcontext.gregs[VKI_ES], sizeof(UShort));
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)(&uc->uc_mcontext.gregs[VKI_ES]) + 2, sizeof(UShort));
+   uc->uc_mcontext.gregs[VKI_FS] = tst->arch.vex.guest_FS;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_FS,
+            (Addr)&uc->uc_mcontext.gregs[VKI_FS], sizeof(UShort));
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)(&uc->uc_mcontext.gregs[VKI_FS]) + 2, sizeof(UShort));
+   uc->uc_mcontext.gregs[VKI_GS] = tst->arch.vex.guest_GS;
+   VG_TRACK(copy_reg_to_mem, part, tid, OFFSET_x86_GS,
+            (Addr)&uc->uc_mcontext.gregs[VKI_GS], sizeof(UShort));
+   VG_TRACK(post_mem_write, part, tid,
+            (Addr)(&uc->uc_mcontext.gregs[VKI_GS]) + 2, sizeof(UShort));
+
+   /* Handle eflags (optimistically make all flags defined). */
+   uc->uc_mcontext.gregs[VKI_EFL] =
+      LibVEX_GuestX86_get_eflags(&tst->arch.vex);
+   VG_TRACK(post_mem_write, part, tid, (Addr)&uc->uc_mcontext.gregs[VKI_EFL],
+         sizeof(UWord));
+   /* The LibVEX_GuestX86_get_eflags() call calculates eflags value from the
+      CC_OP, CC_DEP1, CC_DEP2, CC_NDEP, DFLAG, IDFLAG and ACFLAG guest state
+      values.  The *FLAG values represent one-bit information and are saved
+      without loss of precision into eflags.  However when CC_* values are
+      converted into eflags then precision is lost.  What we do here is to
+      save unmodified CC_* values into unused ucontext members (the 'long
+      uc_filler[5] and 'int fs->__pad[2]' arrays) so we can then restore the
+      context in ML_(restore_machine_context)() without the loss of precision.
+      This imposes a requirement on client programs to not use these two
+      members. Luckily this is never a case in Solaris-gate programs and
+      libraries. */
+   /* CC_OP and CC_NDEP are always defined, but we don't want to tell a tool
+      that we just defined uc_filler[0,1].  This helps if someone uses an
+      uninitialized ucontext and tries to read (use) uc_filler[0,1].  Memcheck
+      in such a case should detect this error. */
+   VKI_UC_GUEST_CC_OP(uc) = tst->arch.vex.guest_CC_OP;
+   VKI_UC_GUEST_CC_NDEP(uc) = tst->arch.vex.guest_CC_NDEP;
+   /* We want to copy shadow values of CC_DEP1 and CC_DEP2 so we have to tell
+      a tool about this copy. */
+   VKI_UC_GUEST_CC_DEP1(uc) = tst->arch.vex.guest_CC_DEP1;
+   VG_TRACK(copy_reg_to_mem, part, tid,
+            offsetof(VexGuestX86State, guest_CC_DEP1),
+            (Addr)&VKI_UC_GUEST_CC_DEP1(uc), sizeof(UWord));
+   VKI_UC_GUEST_CC_DEP2(uc) = tst->arch.vex.guest_CC_DEP2;
+   VG_TRACK(copy_reg_to_mem, part, tid,
+            offsetof(VexGuestX86State, guest_CC_DEP2),
+            (Addr)&VKI_UC_GUEST_CC_DEP2(uc), sizeof(UWord));
+   /* Make another copy of eflags. */
+   VKI_UC_GUEST_EFLAGS_NEG(uc) = ~uc->uc_mcontext.gregs[VKI_EFL];
+   /* Calculate a checksum. */
+   {
+      UInt buf[5];
+      UInt checksum;
+
+      buf[0] = VKI_UC_GUEST_CC_OP(uc);
+      buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
+      buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
+      buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
+      buf[4] = uc->uc_mcontext.gregs[VKI_EFL];
+      checksum = ML_(fletcher32)((UShort*)&buf, sizeof(buf) / sizeof(UShort));
+      /* Store the checksum. */
+      VKI_UC_GUEST_EFLAGS_CHECKSUM(uc) = checksum;
+   }
+
+   /* FPU */
+   /* x87 */
+   vg_assert(sizeof(fs->state) == 108);
+   LibVEX_GuestX86_get_x87(&tst->arch.vex, (UChar*)&fs->state);
+
+   /* Flags and control words */
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->state, 28);
+   /* ST registers */
+   for (i = 0; i < 8; i++) {
+      Addr addr = (Addr)&fs->state + 28 + i * 10;
+      /* x87 uses 80b FP registers but VEX uses only 64b registers, thus we
+         have to lie here. :< */
+      VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
+               guest_FPREG[i]), addr, sizeof(ULong));
+      VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
+               guest_FPREG[i]), addr + 8, sizeof(UShort));
+      }
+
+   /* Status word (sw) at exception */
+   fs->status = 0;
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->status, sizeof(fs->status));
+
+   /* SSE */
+   fs->mxcsr = LibVEX_GuestX86_get_mxcsr(&tst->arch.vex);
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
+
+   /* MXCSR at exception */
+   fs->xstatus = 0;
+   VG_TRACK(post_mem_write, part, tid, (Addr)&fs->xstatus,
+            sizeof(fs->xstatus));
+
+   /* XMM registers */
+#define COPY_OUT_XMM(dest, src) \
+   do {                         \
+      dest._l[0] = src[0];      \
+      dest._l[1] = src[1];      \
+      dest._l[2] = src[2];      \
+      dest._l[3] = src[3];      \
+   } while (0)
+   COPY_OUT_XMM(fs->xmm[0], tst->arch.vex.guest_XMM0);
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
+            guest_XMM0), (Addr)&fs->xmm[0], sizeof(U128));
+   COPY_OUT_XMM(fs->xmm[1], tst->arch.vex.guest_XMM1);
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
+            guest_XMM1), (Addr)&fs->xmm[1], sizeof(U128));
+   COPY_OUT_XMM(fs->xmm[2], tst->arch.vex.guest_XMM2);
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
+            guest_XMM2), (Addr)&fs->xmm[2], sizeof(U128));
+   COPY_OUT_XMM(fs->xmm[3], tst->arch.vex.guest_XMM3);
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
+            guest_XMM3), (Addr)&fs->xmm[3], sizeof(U128));
+   COPY_OUT_XMM(fs->xmm[4], tst->arch.vex.guest_XMM4);
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
+            guest_XMM4), (Addr)&fs->xmm[4], sizeof(U128));
+   COPY_OUT_XMM(fs->xmm[5], tst->arch.vex.guest_XMM5);
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
+            guest_XMM5), (Addr)&fs->xmm[5], sizeof(U128));
+   COPY_OUT_XMM(fs->xmm[6], tst->arch.vex.guest_XMM6);
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
+            guest_XMM6), (Addr)&fs->xmm[6], sizeof(U128));
+   COPY_OUT_XMM(fs->xmm[7], tst->arch.vex.guest_XMM7);
+   VG_TRACK(copy_reg_to_mem, part, tid, offsetof(VexGuestX86State,
+            guest_XMM7), (Addr)&fs->xmm[7], sizeof(U128));
+#undef COPY_OUT_XMM
+}
+
+/* Architecture-specific part of VG_(restore_context). */
+void ML_(restore_machine_context)(ThreadId tid, vki_ucontext_t *uc,
+                                  CorePart part, Bool esp_is_thrptr)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   struct vki_fpchip_state *fs
+      = &uc->uc_mcontext.fpregs.fp_reg_set.fpchip_state;
+
+   /* CPU */
+   if (uc->uc_flags & VKI_UC_CPU) {
+      /* Common registers */
+      tst->arch.vex.guest_EIP = uc->uc_mcontext.gregs[VKI_EIP];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_EIP], OFFSET_x86_EIP,
+               sizeof(UWord));
+      tst->arch.vex.guest_EAX = uc->uc_mcontext.gregs[VKI_EAX];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_EAX], OFFSET_x86_EAX,
+               sizeof(UWord));
+      tst->arch.vex.guest_EBX = uc->uc_mcontext.gregs[VKI_EBX];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_EBX], OFFSET_x86_EBX,
+               sizeof(UWord));
+      tst->arch.vex.guest_ECX = uc->uc_mcontext.gregs[VKI_ECX];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_ECX], OFFSET_x86_ECX,
+               sizeof(UWord));
+      tst->arch.vex.guest_EDX = uc->uc_mcontext.gregs[VKI_EDX];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_EDX], OFFSET_x86_EDX,
+               sizeof(UWord));
+      tst->arch.vex.guest_EBP = uc->uc_mcontext.gregs[VKI_EBP];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_EBP], OFFSET_x86_EBP,
+               sizeof(UWord));
+      tst->arch.vex.guest_ESI = uc->uc_mcontext.gregs[VKI_ESI];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_ESI], OFFSET_x86_ESI,
+               sizeof(UWord));
+      tst->arch.vex.guest_EDI = uc->uc_mcontext.gregs[VKI_EDI];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_EDI], OFFSET_x86_EDI,
+               sizeof(UWord));
+      tst->arch.vex.guest_ESP = uc->uc_mcontext.gregs[VKI_UESP];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_UESP], OFFSET_x86_ESP,
+               sizeof(UWord));
+
+      if (esp_is_thrptr) {
+         /* The thrptr value is passed by libc to the kernel in the otherwise
+            unused ESP field.  This is used when a new thread is created. */
+         VG_TRACK(pre_mem_read, part, tid,
+                  "restore_machine_context(uc->uc_mcontext.gregs[VKI_ESP])",
+                  (Addr)&uc->uc_mcontext.gregs[VKI_ESP], sizeof(UWord));
+         if (uc->uc_mcontext.gregs[VKI_ESP]) {
+            tst->os_state.thrptr = uc->uc_mcontext.gregs[VKI_ESP];
+            ML_(update_gdt_lwpgs)(tid);
+         }
+      }
+
+      /* Ignore ERR and TRAPNO. */
+
+      /* Segment registers */
+      tst->arch.vex.guest_CS = uc->uc_mcontext.gregs[VKI_CS];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_CS], OFFSET_x86_CS,
+               sizeof(UShort));
+      tst->arch.vex.guest_DS = uc->uc_mcontext.gregs[VKI_DS];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_DS], OFFSET_x86_DS,
+               sizeof(UShort));
+      tst->arch.vex.guest_SS = uc->uc_mcontext.gregs[VKI_SS];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_SS], OFFSET_x86_SS,
+               sizeof(UShort));
+      tst->arch.vex.guest_ES = uc->uc_mcontext.gregs[VKI_ES];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_ES], OFFSET_x86_ES,
+               sizeof(UShort));
+      tst->arch.vex.guest_FS = uc->uc_mcontext.gregs[VKI_FS];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_FS], OFFSET_x86_FS,
+               sizeof(UShort));
+      tst->arch.vex.guest_GS = uc->uc_mcontext.gregs[VKI_GS];
+      VG_TRACK(copy_mem_to_reg, part, tid,
+               (Addr)&uc->uc_mcontext.gregs[VKI_GS], OFFSET_x86_GS,
+               sizeof(UShort));
+
+      /* Eflags */
+      {
+         UInt eflags;
+         UInt orig_eflags;
+         UInt new_eflags;
+         Bool ok_restore = False;
+
+         VG_TRACK(pre_mem_read, part, tid,
+                  "restore_machine_context(uc->uc_mcontext.gregs[VKI_EFL])",
+                  (Addr)&uc->uc_mcontext.gregs[VKI_EFL], sizeof(UWord));
+         eflags = uc->uc_mcontext.gregs[VKI_EFL];
+         orig_eflags = LibVEX_GuestX86_get_eflags(&tst->arch.vex);
+         new_eflags = eflags;
+         /* The kernel disallows the ID flag to be changed via the setcontext
+            call, thus do the same. */
+         if (orig_eflags & VKI_EFLAGS_ID_BIT)
+            new_eflags |= VKI_EFLAGS_ID_BIT;
+         else
+            new_eflags &= ~VKI_EFLAGS_ID_BIT;
+         LibVEX_GuestX86_put_eflags(new_eflags, &tst->arch.vex);
+         VG_TRACK(post_reg_write, part, tid,
+                  offsetof(VexGuestX86State, guest_CC_DEP1), sizeof(UWord));
+         VG_TRACK(post_reg_write, part, tid,
+                  offsetof(VexGuestX86State, guest_CC_DEP2), sizeof(UWord));
+
+         /* Check if this context was created by us in VG_(save_context). In
+            that case, try to restore the CC_OP, CC_DEP1, CC_DEP2 and CC_NDEP
+            values which we previously stashed into unused members of the
+            context. */
+         if (eflags != ~VKI_UC_GUEST_EFLAGS_NEG(uc)) {
+            VG_(debugLog)(1, "syswrap-solaris",
+                             "The eflags value was restored from an "
+                             "explicitly set value in thread %d.\n", tid);
+            ok_restore = True;
+         }
+         else {
+            UInt buf[5];
+            UInt checksum;
+
+            buf[0] = VKI_UC_GUEST_CC_OP(uc);
+            buf[1] = VKI_UC_GUEST_CC_NDEP(uc);
+            buf[2] = VKI_UC_GUEST_CC_DEP1(uc);
+            buf[3] = VKI_UC_GUEST_CC_DEP2(uc);
+            buf[4] = eflags;
+            checksum = ML_(fletcher32)((UShort*)&buf,
+                                       sizeof(buf) / sizeof(UShort));
+            if (checksum == VKI_UC_GUEST_EFLAGS_CHECKSUM(uc)) {
+               /* Check ok, the full restoration is possible. */
+               VG_(debugLog)(1, "syswrap-solaris",
+                                "The CC_* guest state values were fully "
+                                "restored in thread %d.\n", tid);
+               ok_restore = True;
+
+               tst->arch.vex.guest_CC_OP = VKI_UC_GUEST_CC_OP(uc);
+               tst->arch.vex.guest_CC_NDEP = VKI_UC_GUEST_CC_NDEP(uc);
+               tst->arch.vex.guest_CC_DEP1 = VKI_UC_GUEST_CC_DEP1(uc);
+               VG_TRACK(copy_mem_to_reg, part, tid,
+                        (Addr)&VKI_UC_GUEST_CC_DEP1(uc),
+                        offsetof(VexGuestX86State, guest_CC_DEP1),
+                        sizeof(UWord));
+               tst->arch.vex.guest_CC_DEP2 = VKI_UC_GUEST_CC_DEP2(uc);
+               VG_TRACK(copy_mem_to_reg, part, tid,
+                        (Addr)&VKI_UC_GUEST_CC_DEP2(uc),
+                        offsetof(VexGuestX86State, guest_CC_DEP2),
+                        sizeof(UWord));
+            }
+         }
+
+         if (!ok_restore)
+            VG_(debugLog)(1, "syswrap-solaris",
+                             "Cannot fully restore the CC_* guest state "
+                             "values, using approximate eflags in thread "
+                             "%d.\n", tid);
+      }
+   }
+
+   if (uc->uc_flags & VKI_UC_FPU) {
+      /* FPU */
+      VexEmNote note;
+      SizeT i;
+
+      /* x87 */
+      /* Flags and control words */
+      VG_TRACK(pre_mem_read, part, tid,
+               "restore_machine_context(uc->uc_mcontext.fpregs..x87_state)",
+               (Addr)&fs->state, 28);
+      /* ST registers */
+      for (i = 0; i < 8; i++) {
+         Addr addr = (Addr)&fs->state + 28 + i * 10;
+         VG_TRACK(copy_mem_to_reg, part, tid, addr,
+                  offsetof(VexGuestX86State, guest_FPREG[i]), sizeof(ULong));
+      }
+      note = LibVEX_GuestX86_put_x87((UChar*)&fs->state, &tst->arch.vex);
+      if (note != EmNote_NONE)
+         VG_(message)(Vg_UserMsg,
+                      "Error restoring x87 state in thread %d: %s.\n",
+                      tid, LibVEX_EmNote_string(note));
+
+      /* SSE */
+      VG_TRACK(pre_mem_read, part, tid,
+               "restore_machine_context(uc->uc_mcontext.fpregs..mxcsr)",
+               (Addr)&fs->mxcsr, sizeof(fs->mxcsr));
+      note = LibVEX_GuestX86_put_mxcsr(fs->mxcsr, &tst->arch.vex);
+      if (note != EmNote_NONE)
+         VG_(message)(Vg_UserMsg,
+                      "Error restoring mxcsr state in thread %d: %s.\n",
+                      tid, LibVEX_EmNote_string(note));
+      /* XMM registers */
+#define COPY_IN_XMM(src, dest) \
+      do {                     \
+         dest[0] = src._l[0];  \
+         dest[1] = src._l[1];  \
+         dest[2] = src._l[2];  \
+         dest[3] = src._l[3];  \
+      } while (0)
+      COPY_IN_XMM(fs->xmm[0], tst->arch.vex.guest_XMM0);
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[0],
+               offsetof(VexGuestX86State, guest_XMM0), sizeof(U128));
+      COPY_IN_XMM(fs->xmm[1], tst->arch.vex.guest_XMM1);
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[1],
+               offsetof(VexGuestX86State, guest_XMM1), sizeof(U128));
+      COPY_IN_XMM(fs->xmm[2], tst->arch.vex.guest_XMM2);
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[2],
+               offsetof(VexGuestX86State, guest_XMM2), sizeof(U128));
+      COPY_IN_XMM(fs->xmm[3], tst->arch.vex.guest_XMM3);
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[3],
+               offsetof(VexGuestX86State, guest_XMM3), sizeof(U128));
+      COPY_IN_XMM(fs->xmm[4], tst->arch.vex.guest_XMM4);
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[4],
+               offsetof(VexGuestX86State, guest_XMM4), sizeof(U128));
+      COPY_IN_XMM(fs->xmm[5], tst->arch.vex.guest_XMM5);
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[5],
+               offsetof(VexGuestX86State, guest_XMM5), sizeof(U128));
+      COPY_IN_XMM(fs->xmm[6], tst->arch.vex.guest_XMM6);
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[6],
+               offsetof(VexGuestX86State, guest_XMM6), sizeof(U128));
+      COPY_IN_XMM(fs->xmm[7], tst->arch.vex.guest_XMM7);
+      VG_TRACK(copy_mem_to_reg, part, tid, (Addr)&fs->xmm[7],
+               offsetof(VexGuestX86State, guest_XMM7), sizeof(U128));
+#undef COPY_IN_XMM
+   }
+}
+
+/* Allocate GDT for a given thread. */
+void ML_(setup_gdt)(VexGuestX86State *vex)
+{
+   Addr gdt = (Addr)VG_(calloc)("syswrap-solaris-x86.gdt",
+                                VEX_GUEST_X86_GDT_NENT,
+                                sizeof(VexGuestX86SegDescr));
+   vex->guest_GDT = gdt;
+}
+
+/* Deallocate GDT for a given thread. */
+void ML_(cleanup_gdt)(VexGuestX86State *vex)
+{
+   if (!vex->guest_GDT)
+      return;
+   VG_(free)((void*)vex->guest_GDT);
+   vex->guest_GDT = 0;
+}
+
+/* For a given thread, update the LWPGS descriptor in the thread's GDT
+   according to the thread pointer. */
+void ML_(update_gdt_lwpgs)(ThreadId tid)
+{
+   ThreadState *tst = VG_(get_ThreadState)(tid);
+   Addr base = tst->os_state.thrptr;
+   VexGuestX86SegDescr *gdt = (VexGuestX86SegDescr*)tst->arch.vex.guest_GDT;
+   VexGuestX86SegDescr desc;
+
+   vg_assert(gdt);
+
+   VG_(memset)(&desc, 0, sizeof(desc));
+   if (base) {
+      desc.LdtEnt.Bits.LimitLow = -1;
+      desc.LdtEnt.Bits.LimitHi = -1;
+      desc.LdtEnt.Bits.BaseLow = base & 0xffff;
+      desc.LdtEnt.Bits.BaseMid = (base >> 16) & 0xff;
+      desc.LdtEnt.Bits.BaseHi = (base >> 24) & 0xff;
+      desc.LdtEnt.Bits.Pres = 1;
+      desc.LdtEnt.Bits.Dpl = 3; /* SEL_UPL */
+      desc.LdtEnt.Bits.Type = 19; /* SDT_MEMRWA */
+      desc.LdtEnt.Bits.Granularity = 1; /* SDP_PAGES */
+      desc.LdtEnt.Bits.Default_Big = 1; /* SDP_OP32 */
+   }
+
+   gdt[VKI_GDT_LWPGS] = desc;
+
+   /* Write %gs. */
+   tst->arch.vex.guest_GS = VKI_LWPGS_SEL;
+   VG_TRACK(post_reg_write, Vg_CoreSysCall, tid, OFFSET_x86_GS,
+            sizeof(UShort));
+}
+
+
+/* ---------------------------------------------------------------------
+   PRE/POST wrappers for x86/Solaris-specific syscalls
+   ------------------------------------------------------------------ */
+
+#define PRE(name)       DEFN_PRE_TEMPLATE(x86_solaris, name)
+#define POST(name)      DEFN_POST_TEMPLATE(x86_solaris, name)
+
+/* implementation */
+
+PRE(sys_fstatat64)
+{
+   /* int fstatat64(int fildes, const char *path, struct stat64 *buf,
+                    int flag); */
+   PRINT("sys_fstatat64 ( %ld, %#lx(%s), %#lx, %ld )", ARG1, ARG2,
+         (char*)ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "fstatat64", int, fildes, const char *, path,
+                 struct stat64 *, buf, int, flag);
+   if (ARG2)
+      PRE_MEM_RASCIIZ("fstatat64(path)", ARG2);
+   PRE_MEM_WRITE("fstatat64(buf)", ARG3, sizeof(struct vki_stat64));
+
+   /* Be strict. */
+   if (ARG1 != VKI_AT_FDCWD &&
+       !ML_(fd_allowed)(ARG1, "fstatat64", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_fstatat64)
+{
+   POST_MEM_WRITE(ARG3, sizeof(struct vki_stat64));
+}
+
+PRE(sys_openat64)
+{
+   /* int openat64(int fildes, const char *filename, int flags);
+      int openat64(int fildes, const char *filename, int flags, mode_t mode);
+    */
+   *flags |= SfMayBlock;
+
+   if (ARG3 & VKI_O_CREAT) {
+      /* 4-arg version */
+      PRINT("sys_openat64 ( %ld, %#lx(%s), %ld, %ld )", ARG1, ARG2,
+            (char*)ARG2, ARG3, ARG4);
+      PRE_REG_READ4(long, "openat64", int, fildes, const char *, filename,
+                    int, flags, vki_mode_t, mode);
+   }
+   else {
+      /* 3-arg version */
+      PRINT("sys_openat64 ( %ld, %#lx(%s), %ld )", ARG1, ARG2, (char*)ARG2,
+            ARG3);
+      PRE_REG_READ3(long, "openat64", int, fildes, const char *, filename,
+                    int, flags);
+   }
+
+   PRE_MEM_RASCIIZ("openat64(filename)", ARG2);
+
+   /* Be strict. */
+   if (ARG1 != VKI_AT_FDCWD && !ML_(fd_allowed)(ARG1, "openat64", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_openat64)
+{
+   if (!ML_(fd_allowed)(RES, "openat64", tid, True)) {
+      VG_(close)(RES);
+      SET_STATUS_Failure(VKI_EMFILE);
+   }
+   else if (VG_(clo_track_fds))
+      ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG2);
+}
+
+PRE(sys_llseek32)
+{
+   /* offset_t llseek(int fildes, offset_t offset, int whence); */
+   PRINT("sys_llseek32 ( %ld, %#lx, %#lx, %ld )", ARG1, ARG2, ARG3, ARG4);
+   PRE_REG_READ4(long, "llseek", int, fildes, vki_u32, offset_low,
+                 vki_u32, offset_high, int, whence);
+
+   /* Stay sane. */
+   if (!ML_(fd_allowed)(ARG1, "llseek", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_mmap64)
+{
+   /* void *mmap64(void *addr, size_t len, int prot, int flags,
+                   int fildes, uint32_t offlo, uint32_t offhi); */
+   /* Note this wrapper assumes a little-endian architecture, offlo and offhi
+      have to be swapped if a big-endian architecture is present. */
+#if !defined(VG_LITTLEENDIAN)
+#error "Unexpected endianness."
+#endif /* !VG_LITTLEENDIAN */
+
+   SysRes r;
+   ULong u;
+   Off64T offset;
+
+   /* Stay sane. */
+   vg_assert(VKI_PAGE_SIZE == 4096);
+   vg_assert(sizeof(u) == sizeof(offset));
+
+   PRINT("sys_mmap ( %#lx, %#lx, %#lx, %#lx, %ld, %#lx, %#lx )",
+         ARG1, ARG2, ARG3, ARG4, ARG5, ARG6, ARG7);
+   PRE_REG_READ7(long, "mmap", void *, start, vki_size_t, length,
+                 int, prot, int, flags, int, fd, uint32_t, offlo,
+                 uint32_t, offhi);
+
+   /* The offlo and offhi values can actually represent a negative value.
+      Make sure it's passed correctly to the generic mmap wrapper. */
+   u = ((ULong)ARG7 << 32) + ARG6;
+   offset = *(Off64T*)&u;
+
+   r = ML_(generic_PRE_sys_mmap)(tid, ARG1, ARG2, ARG3, ARG4, ARG5, offset);
+   SET_STATUS_from_SysRes(r);
+}
+
+PRE(sys_stat64)
+{
+   /* int stat64(const char *path, struct stat64 *buf); */
+   PRINT("sys_stat64 ( %#lx(%s), %#lx )", ARG1, (char*)ARG1, ARG2);
+   PRE_REG_READ2(long, "stat64", const char *, path, struct stat64 *, buf);
+
+   PRE_MEM_RASCIIZ("stat64(path)", ARG1);
+   PRE_MEM_WRITE("stat64(buf)", ARG2, sizeof(struct vki_stat64));
+}
+
+POST(sys_stat64)
+{
+   POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
+}
+
+PRE(sys_lstat64)
+{
+   /* int lstat64(const char *path, struct stat64 *buf); */
+   PRINT("sys_lstat64 ( %#lx(%s), %#lx )", ARG1, (char*)ARG1, ARG2);
+   PRE_REG_READ2(long, "lstat64", const char *, path, struct stat64 *, buf);
+
+   PRE_MEM_RASCIIZ("lstat64(path)", ARG1);
+   PRE_MEM_WRITE("lstat64(buf)", ARG2, sizeof(struct vki_stat64));
+}
+
+POST(sys_lstat64)
+{
+   POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
+}
+
+PRE(sys_fstat64)
+{
+   /* int fstat64(int fildes, struct stat64 *buf); */
+   PRINT("sys_fstat64 ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "fstat64", int, fildes, struct stat64 *, buf);
+   PRE_MEM_WRITE("fstat64(buf)", ARG2, sizeof(struct vki_stat64));
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "fstat64", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_fstat64)
+{
+   POST_MEM_WRITE(ARG2, sizeof(struct vki_stat64));
+}
+
+static void do_statvfs64_post(struct vki_statvfs64 *stats, ThreadId tid)
+{
+   POST_FIELD_WRITE(stats->f_bsize);
+   POST_FIELD_WRITE(stats->f_frsize);
+   POST_FIELD_WRITE(stats->f_blocks);
+   POST_FIELD_WRITE(stats->f_bfree);
+   POST_FIELD_WRITE(stats->f_bavail);
+   POST_FIELD_WRITE(stats->f_files);
+   POST_FIELD_WRITE(stats->f_ffree);
+   POST_FIELD_WRITE(stats->f_favail);
+   POST_FIELD_WRITE(stats->f_fsid);
+   POST_MEM_WRITE((Addr) stats->f_basetype, VG_(strlen)(stats->f_basetype) + 1);
+   POST_FIELD_WRITE(stats->f_flag);
+   POST_FIELD_WRITE(stats->f_namemax);
+   POST_MEM_WRITE((Addr) stats->f_fstr, VG_(strlen)(stats->f_fstr) + 1);
+}
+
+PRE(sys_statvfs64)
+{
+   /* int statvfs64(const char *path, struct statvfs64 *buf); */
+   *flags |= SfMayBlock;
+   PRINT("sys_statvfs64 ( %#lx(%s), %#lx )", ARG1, (HChar *) ARG1, ARG2);
+   PRE_REG_READ2(long, "statvfs64", const char *, path,
+                 struct vki_statvfs64 *, buf);
+   PRE_MEM_RASCIIZ("statvfs64(path)", ARG1);
+   PRE_MEM_WRITE("statvfs64(buf)", ARG2, sizeof(struct vki_statvfs64));
+}
+
+POST(sys_statvfs64)
+{
+   do_statvfs64_post((struct vki_statvfs64 *) ARG2, tid);
+}
+
+PRE(sys_fstatvfs64)
+{
+   /* int fstatvfs64(int fd, struct statvfs64 *buf); */
+   *flags |= SfMayBlock;
+   PRINT("sys_fstatvfs64 ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "fstatvfs64", int, fd, struct vki_statvfs64 *, buf);
+   PRE_MEM_WRITE("fstatvfs64(buf)", ARG2, sizeof(struct vki_statvfs64));
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "fstatvfs64", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_fstatvfs64)
+{
+   do_statvfs64_post((struct vki_statvfs64 *) ARG2, tid);
+}
+
+PRE(sys_setrlimit64)
+{
+   /* int setrlimit64(int resource, struct rlimit64 *rlim); */
+   struct vki_rlimit64 *limit = (struct vki_rlimit64 *)ARG2;
+   PRINT("sys_setrlimit64 ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "setrlimit64", int, resource, struct rlimit64 *, rlim);
+   PRE_MEM_READ("setrlimit64(rlim)", ARG2, sizeof(struct vki_rlimit64));
+
+   if (limit && limit->rlim_cur > limit->rlim_max)
+      SET_STATUS_Failure(VKI_EINVAL);
+   else if (ARG1 == VKI_RLIMIT_NOFILE) {
+      if (limit->rlim_cur > VG_(fd_hard_limit) ||
+          limit->rlim_max != VG_(fd_hard_limit)) {
+         SET_STATUS_Failure(VKI_EPERM);
+      }
+      else {
+         VG_(fd_soft_limit) = limit->rlim_cur;
+         SET_STATUS_Success(0);
+      }
+   }
+   else if (ARG1 == VKI_RLIMIT_DATA) {
+      if (limit->rlim_cur > VG_(client_rlimit_data).rlim_max ||
+          limit->rlim_max > VG_(client_rlimit_data).rlim_max) {
+         SET_STATUS_Failure(VKI_EPERM);
+      }
+      else {
+         VG_(client_rlimit_data).rlim_max = limit->rlim_max;
+         VG_(client_rlimit_data).rlim_cur = limit->rlim_cur;
+         SET_STATUS_Success(0);
+      }
+   }
+   else if (ARG1 == VKI_RLIMIT_STACK && tid == 1) {
+      if (limit->rlim_cur > VG_(client_rlimit_stack).rlim_max ||
+          limit->rlim_max > VG_(client_rlimit_stack).rlim_max) {
+         SET_STATUS_Failure(VKI_EPERM);
+      }
+      else {
+         /* Change the value of client_stack_szB to the rlim_cur value but
+            only if it is smaller than the size of the allocated stack for the
+            client. */
+         if (limit->rlim_cur <= VG_(clstk_max_size))
+            VG_(threads)[tid].client_stack_szB = limit->rlim_cur;
+
+         VG_(client_rlimit_stack).rlim_max = limit->rlim_max;
+         VG_(client_rlimit_stack).rlim_cur = limit->rlim_cur;
+         SET_STATUS_Success(0);
+      }
+   }
+}
+
+PRE(sys_getrlimit64)
+{
+   /* int getrlimit64(int resource, struct rlimit64 *rlim); */
+   PRINT("sys_getrlimit64 ( %ld, %#lx )", ARG1, ARG2);
+   PRE_REG_READ2(long, "getrlimit64",
+                 int, resource, struct rlimit64 *, rlim);
+   PRE_MEM_WRITE("getrlimit64(rlim)", ARG2, sizeof(struct vki_rlimit64));
+}
+
+POST(sys_getrlimit64)
+{
+   /* Based on common_post_getrlimit() from syswrap-generic.c. */
+   struct vki_rlimit64 *rlim = (struct vki_rlimit64*)ARG2;
+
+   POST_MEM_WRITE(ARG2, sizeof(struct vki_rlimit64));
+
+   switch (ARG1 /*resource*/) {
+   case VKI_RLIMIT_NOFILE:
+      rlim->rlim_cur = VG_(fd_soft_limit);
+      rlim->rlim_max = VG_(fd_hard_limit);
+      break;
+   case VKI_RLIMIT_DATA:
+      rlim->rlim_cur = VG_(client_rlimit_data).rlim_cur;
+      rlim->rlim_max = VG_(client_rlimit_data).rlim_max;
+      break;
+   case VKI_RLIMIT_STACK:
+      rlim->rlim_cur = VG_(client_rlimit_stack).rlim_cur;
+      rlim->rlim_max = VG_(client_rlimit_stack).rlim_max;
+      break;
+   }
+}
+
+PRE(sys_pread64)
+{
+   /* ssize32_t pread64(int fd, void *buf, size32_t count,
+                        uint32_t offset_1, uint32_t offset_2);
+    */
+   *flags |= SfMayBlock;
+   PRINT("sys_pread64 ( %ld, %#lx, %ld, %ld, %ld )",
+         ARG1, ARG2, ARG3, ARG4, ARG5);
+   PRE_REG_READ5(long, "pread64", int, fd, void *, buf, vki_size32_t, count,
+                 vki_uint32_t, offset_1, vki_uint32_t, offset_2);
+   PRE_MEM_WRITE("pread64(buf)", ARG2, ARG3);
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "pread64", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+POST(sys_pread64)
+{
+   POST_MEM_WRITE(ARG2, RES);
+}
+
+PRE(sys_pwrite64)
+{
+   /* ssize32_t pwrite64(int fd, void *buf, size32_t count,
+                         uint32_t offset_1, uint32_t offset_2);
+    */
+   *flags |= SfMayBlock;
+   PRINT("sys_pwrite64 ( %ld, %#lx, %ld, %ld, %ld )",
+         ARG1, ARG2, ARG3, ARG4, ARG5);
+   PRE_REG_READ5(long, "pwrite64", int, fd, void *, buf, vki_size32_t, count,
+                 vki_uint32_t, offset_1, vki_uint32_t, offset_2);
+   PRE_MEM_READ("pwrite64(buf)", ARG2, ARG3);
+
+   /* Be strict. */
+   if (!ML_(fd_allowed)(ARG1, "pwrite64", tid, False))
+      SET_STATUS_Failure(VKI_EBADF);
+}
+
+PRE(sys_open64)
+{
+   /* int open64(const char *filename, int flags);
+      int open64(const char *filename, int flags, mode_t mode); */
+   *flags |= SfMayBlock;
+
+   if (ARG2 & VKI_O_CREAT) {
+      /* 3-arg version */
+      PRINT("sys_open64 ( %#lx(%s), %ld, %ld )", ARG1, (char*)ARG1, ARG2,
+            ARG3);
+      PRE_REG_READ3(long, "open64", const char *, filename, int, flags,
+                    vki_mode_t, mode);
+   }
+   else {
+      /* 2-arg version */
+      PRINT("sys_open64 ( %#lx(%s), %ld )", ARG1, (char*)ARG1, ARG2);
+      PRE_REG_READ2(long, "open64", const char *, filename, int, flags);
+   }
+   PRE_MEM_RASCIIZ("open(filename)", ARG1);
+}
+
+POST(sys_open64)
+{
+   if (!ML_(fd_allowed)(RES, "open64", tid, True)) {
+      VG_(close)(RES);
+      SET_STATUS_Failure(VKI_EMFILE);
+   }
+   else if (VG_(clo_track_fds))
+      ML_(record_fd_open_with_given_name)(tid, RES, (HChar*)ARG1);
+}
+
+#undef PRE
+#undef POST
+
+#endif // defined(VGP_x86_solaris)
+
+/*--------------------------------------------------------------------*/
+/*--- end                                                          ---*/
+/*--------------------------------------------------------------------*/
diff --git a/coregrind/m_tooliface.c b/coregrind/m_tooliface.c
index 40d8325..2932849 100644
--- a/coregrind/m_tooliface.c
+++ b/coregrind/m_tooliface.c
@@ -441,6 +441,9 @@
 DEF0(track_pre_reg_read,          CorePart, ThreadId, const HChar*, PtrdiffT, SizeT)
 DEF0(track_post_reg_write,        CorePart, ThreadId,               PtrdiffT, SizeT)
 
+DEF0(track_copy_mem_to_reg,       CorePart, ThreadId, Addr, PtrdiffT, SizeT)
+DEF0(track_copy_reg_to_mem,       CorePart, ThreadId, PtrdiffT, Addr, SizeT)
+
 DEF0(track_post_reg_write_clientcall_return, ThreadId, PtrdiffT, SizeT, Addr)
 
 DEF0(track_start_client_code,     ThreadId, ULong)
diff --git a/coregrind/m_trampoline.S b/coregrind/m_trampoline.S
index 027f9f6..189479c 100644
--- a/coregrind/m_trampoline.S
+++ b/coregrind/m_trampoline.S
@@ -1329,6 +1329,7 @@
 #	undef UD2_1024
 #	undef UD2_PAGE
 
+/*---------------------- tilegx-linux ----------------------*/
 #else
 #if defined(VGP_tilegx_linux)
 
@@ -1399,6 +1400,186 @@
 #	undef UD2_4K
 #	undef UD2_16K
 #	undef UD2_PAGE
+
+/*---------------- x86-solaris ----------------*/
+#else
+#if defined(VGP_x86_solaris)
+
+.global VG_(trampoline_stuff_start)
+VG_(trampoline_stuff_start):
+
+/* int strcmp(const char *s1, const char *s2); */
+.global VG_(x86_solaris_REDIR_FOR_strcmp)
+.type   VG_(x86_solaris_REDIR_FOR_strcmp), @function
+VG_(x86_solaris_REDIR_FOR_strcmp):
+        pushl   %ebp                    /* establish a stack frame */
+        movl    %esp, %ebp
+        movl    8(%ebp), %edx           /* get s1 */
+        movl    12(%esp), %ecx          /* get s2 */
+        jmp     2f                      /* go compare the first characters */
+1:
+        incl    %edx                    /* skip to the next s1 character */
+        incl    %ecx                    /* skip to the next s2 character */
+2:
+        movzbl  (%edx), %eax            /* load a character from s1 */
+        testb   %al, %al                /* is it null? */
+        jz      3f                      /* yes, exit */
+        cmpb    (%ecx), %al             /* are the characters equal? */
+        je      1b                      /* yes, proceed with next characters */
+3:
+        movzbl  (%ecx), %edx            /* load a character from s2 */
+        subl    %edx, %eax              /* calculate the return value */
+        popl    %ebp                    /* destroy the stack frame */
+        ret                             /* return to the caller */
+.size VG_(x86_solaris_REDIR_FOR_strcmp), .-VG_(x86_solaris_REDIR_FOR_strcmp)
+
+/* size_t strlen(const char *s); */
+.global VG_(x86_solaris_REDIR_FOR_strlen)
+.type   VG_(x86_solaris_REDIR_FOR_strlen), @function
+VG_(x86_solaris_REDIR_FOR_strlen):
+        pushl   %ebp                    /* establish a stack frame */
+        movl    %esp, %ebp
+        movl    8(%ebp), %edx           /* get s */
+        movl    %edx, %eax              /* copy s */
+        jmp     2f                      /* go handle the first character */
+1:
+        incl    %eax                    /* skip to the next s character */
+2:
+        cmpb    $0, (%eax)              /* is the s character null? */
+        jne     1b                      /* no, go process the next character */
+        subl    %edx, %eax              /* calculate the return value */
+        popl    %ebp                    /* destroy the stack frame */
+        ret                             /* return to the caller */
+.size VG_(x86_solaris_REDIR_FOR_strlen), .-VG_(x86_solaris_REDIR_FOR_strlen)
+
+.global VG_(trampoline_stuff_end)
+VG_(trampoline_stuff_end):
+
+/*---------------- amd64-solaris ----------------*/
+#else
+#if defined(VGP_amd64_solaris)
+
+.global VG_(trampoline_stuff_start)
+VG_(trampoline_stuff_start):
+
+/* char *strcpy(char *restrict s1, const char *restrict s2); */
+.global VG_(amd64_solaris_REDIR_FOR_strcpy)
+.type   VG_(amd64_solaris_REDIR_FOR_strcpy), @function
+VG_(amd64_solaris_REDIR_FOR_strcpy):
+        pushq   %rbp                    /* establish a stack frame */
+        movq    %rsp, %rbp
+        movq    %rdi, %rdx              /* copy s1 */
+1:
+        movzbl  (%rsi), %eax            /* load one input character */
+        movb    %al, (%rdx)             /* copy to output/s2 */
+        incq    %rsi                    /* skip to the next output character */
+        incq    %rdx                    /* skip to the next input character */
+        testb   %al, %al                /* is the copied character null? */
+        jnz     1b                      /* no, copy the next character */
+        leave                           /* destroy the stack frame */
+        movq    %rdi, %rax              /* set s1 as the return value */
+        ret                             /* return to the caller */
+.size VG_(amd64_solaris_REDIR_FOR_strcpy), .-VG_(amd64_solaris_REDIR_FOR_strcpy)
+
+/* char *strncpy(char *restrict s1, const char *restrict s2, size_t n); */
+.global VG_(amd64_solaris_REDIR_FOR_strncpy)
+.type   VG_(amd64_solaris_REDIR_FOR_strncpy), @function
+VG_(amd64_solaris_REDIR_FOR_strncpy):
+        pushq   %rbp                    /* establish a stack frame */
+        movq    %rsp, %rbp
+        movq    %rdi, %rcx              /* copy s1 */
+1:
+        testq   %rdx, %rdx              /* is the remaining size zero? */
+        jz      3f                      /* yes, all done */
+        movzbl  (%rsi), %eax            /* load one input character */
+        movb    %al, (%rcx)             /* copy to output/s2 */
+        decq    %rdx                    /* decrement the remaining size */
+        incq    %rsi                    /* skip to the next output character */
+        incq    %rcx                    /* skip to the next input character */
+        testb   %al, %al                /* is the copied character null? */
+        jnz     1b                      /* no, copy the next character */
+2:
+        testq   %rdx, %rdx              /* is the remaining size zero? */
+        jz      3f                      /* yes, all done */
+        movb    $0, (%rdx)              /* copy null to output/s2 */
+        decq    %rdx                    /* decrement the remaining size */
+        incq    %rsi                    /* skip to next output character */
+        jmp     2b                      /* proceed with the next character */
+3:
+        leave                           /* destroy the stack frame */
+        movq    %rdi, %rax              /* set s1 as the return value */
+        ret                             /* return to the caller */
+.size VG_(amd64_solaris_REDIR_FOR_strncpy), .-VG_(amd64_solaris_REDIR_FOR_strncpy)
+
+/* int strcmp(const char *s1, const char *s2); */
+.global VG_(amd64_solaris_REDIR_FOR_strcmp)
+.type   VG_(amd64_solaris_REDIR_FOR_strcmp), @function
+VG_(amd64_solaris_REDIR_FOR_strcmp):
+        pushq   %rbp                    /* establish a stack frame */
+        movq    %rsp, %rbp
+        jmp     2f                      /* go compare the first characters */
+1:
+        incq    %rdi                    /* skip to the next s1 character */
+        incq    %rsi                    /* skip to the next s2 character */
+2:
+        movzbl  (%rdi), %eax            /* load a character from s1 */
+        testb   %al, %al                /* is it null? */
+        jz      3f                      /* yes, exit */
+        cmpb    (%rsi), %al             /* are the characters equal? */
+        je      1b                      /* yes, proceed with next characters */
+3:
+        movzbl  (%rsi), %edx            /* load a character from s2 */
+        subl    %edx, %eax              /* calculate the return value */
+        leave                           /* destroy the stack frame */
+        ret                             /* return to the caller */
+.size VG_(amd64_solaris_REDIR_FOR_strcmp), .-VG_(amd64_solaris_REDIR_FOR_strcmp)
+
+/* char *strcat(char *restrict s1, const char *restrict s2); */
+.global VG_(amd64_solaris_REDIR_FOR_strcat)
+.type   VG_(amd64_solaris_REDIR_FOR_strcat), @function
+VG_(amd64_solaris_REDIR_FOR_strcat):
+        pushq   %rbp                    /* establish a stack frame */
+        movq    %rsp, %rbp
+        movq    %rdi, %rdx              /* copy s1 */
+        jmp     2f                      /* go handle the first character */
+1:
+        incq    %rdx                    /* skip to the next s1 character */
+2:
+        cmpb    $0, (%rdx)              /* is the s1 character null? */
+        jne     1b                      /* no, go check the next character */
+3:
+        movzbl  (%rsi), %eax            /* load a character from s2 */
+        movb    %al, (%rdx)             /* copy the s2 character to s1 */
+        incq    %rdx                    /* skip to the next s1 character */
+        incq    %rsi                    /* skip to the next s2 character */
+        testb   %al, %al                /* was the character null? */
+        jnz     3b                      /* no, go copy the next character */
+        movq    %rdi, %rax              /* set s1 as the return value */
+        leave                           /* destroy the stack frame */
+        ret                             /* return to the caller */
+.size VG_(amd64_solaris_REDIR_FOR_strcat), .-VG_(amd64_solaris_REDIR_FOR_strcat)
+
+/* size_t strlen(const char *s); */
+.global VG_(amd64_solaris_REDIR_FOR_strlen)
+.type   VG_(amd64_solaris_REDIR_FOR_strlen), @function
+VG_(amd64_solaris_REDIR_FOR_strlen):
+        pushq   %rbp                    /* establish a stack frame */
+        movq    %rsp, %rbp
+        movq    %rdi, %rax              /* copy s */
+        jmp     2f                      /* go handle the first character */
+1:
+        incq    %rax                    /* skip to the next s character */
+2:
+        cmpb    $0, (%rax)              /* is the s character null? */
+        jne     1b                      /* no, go process the next character */
+        subq    %rdi, %rax              /* calculate the return value */
+        leave                           /* destroy the stack frame */
+        ret                             /* return to the caller */
+.size VG_(amd64_solaris_REDIR_FOR_strlen), .-VG_(amd64_solaris_REDIR_FOR_strlen)
+
+.global VG_(trampoline_stuff_end)
+VG_(trampoline_stuff_end):
+
 /*---------------- unknown ----------------*/
 #else
 #  error Unknown platform
@@ -1415,6 +1596,8 @@
 #endif
 #endif
 #endif
+#endif
+#endif
 
 #if defined(VGO_linux)
 /* Let the linker know we don't need an executable stack */
diff --git a/coregrind/m_translate.c b/coregrind/m_translate.c
index 86a3f4a..d277a24 100644
--- a/coregrind/m_translate.c
+++ b/coregrind/m_translate.c
@@ -1679,6 +1679,9 @@
    vex_abiinfo.guest_ppc_zap_RZ_at_bl         = const_True;
    vex_abiinfo.host_ppc_calls_use_fndescrs    = False;
 #  endif
+#  if defined(VGP_amd64_solaris)
+   vex_abiinfo.guest_amd64_assume_fs_is_const = True;
+#  endif
 
    /* Set up closure args. */
    closure.tid    = tid;
diff --git a/coregrind/m_ume/elf.c b/coregrind/m_ume/elf.c
index 0e4c1cd..0516701 100644
--- a/coregrind/m_ume/elf.c
+++ b/coregrind/m_ume/elf.c
@@ -28,7 +28,7 @@
    The GNU General Public License is contained in the file COPYING.
 */
 
-#if defined(VGO_linux)
+#if defined(VGO_linux) || defined(VGO_solaris)
 
 #include "pub_core_basics.h"
 #include "pub_core_vki.h"
@@ -47,10 +47,15 @@
 #include "priv_ume.h"
 
 /* --- !!! --- EXTERNAL HEADERS start --- !!! --- */
-#define _GNU_SOURCE
-#define _FILE_OFFSET_BITS 64
+#if defined(VGO_linux)
+#  define _GNU_SOURCE
+#  define _FILE_OFFSET_BITS 64
+#endif
 /* This is for ELF types etc, and also the AT_ constants. */
 #include <elf.h>
+#if defined(VGO_solaris)
+#  include <sys/fasttrap.h> // PT_SUNWDTRACE_SIZE
+#endif
 /* --- !!! --- EXTERNAL HEADERS end --- !!! --- */
 
 
@@ -307,6 +312,9 @@
    Int i;
    void *entry;
    ESZ(Addr) ebase = 0;
+#  if defined(VGO_solaris)
+   ESZ(Addr) thrptr_addr = 0;
+#  endif
 
 #  if defined(HAVE_PIE)
    ebase = info->exe_base;
@@ -363,6 +371,18 @@
          if (ph->p_vaddr+ph->p_memsz > maxaddr)
             maxaddr = ph->p_vaddr+ph->p_memsz;
          break;
+
+#     if defined(VGO_solaris)
+      case PT_SUNWDTRACE:
+         if (ph->p_memsz < PT_SUNWDTRACE_SIZE ||
+             (ph->p_flags & (PF_R | PF_W | PF_X)) != (PF_R | PF_W | PF_X)) {
+            VG_(printf)("valgrind: m_ume.c: too small SUNWDTRACE size\n");
+            return VKI_ENOEXEC;
+         }
+
+         info->init_thrptr = ph->p_vaddr + ebase;
+         break;
+#     endif
                         
       case PT_INTERP: {
          HChar *buf = VG_(malloc)("ume.LE.1", ph->p_filesz+1);
@@ -392,6 +412,21 @@
             ESZ(Phdr) *iph = &interp->p[j];
             ESZ(Addr) end;
 
+#           if defined(VGO_solaris)
+            if (iph->p_type == PT_SUNWDTRACE) {
+               if (iph->p_memsz < PT_SUNWDTRACE_SIZE ||
+                   (iph->p_flags & (PF_R | PF_W | PF_X))
+                      != (PF_R | PF_W | PF_X)) {
+                  VG_(printf)("valgrind: m_ume.c: too small SUNWDTRACE size\n");
+                  return VKI_ENOEXEC;
+               }
+
+               /* Store the thrptr value into a temporary because we do not
+                  know yet where the interpreter is mapped. */
+               thrptr_addr = iph->p_vaddr;
+            }
+#           endif
+
             if (iph->p_type != PT_LOAD || iph->p_memsz == 0)
                continue;
             
@@ -409,9 +444,15 @@
          }
          break;
 
+#     if defined(PT_GNU_STACK) || defined(PT_SUNWSTACK)
 #     if defined(PT_GNU_STACK)
       /* Android's elf.h doesn't appear to have PT_GNU_STACK. */
       case PT_GNU_STACK:
+#     endif
+#     if defined(PT_SUNWSTACK)
+      /* Solaris-specific program header. */
+      case PT_SUNWSTACK:
+#     endif
          if ((ph->p_flags & PF_X) == 0) info->stack_prot &= ~VKI_PROT_EXEC;
          if ((ph->p_flags & PF_W) == 0) info->stack_prot &= ~VKI_PROT_WRITE;
          if ((ph->p_flags & PF_R) == 0) info->stack_prot &= ~VKI_PROT_READ;
@@ -494,6 +535,10 @@
       entry = (void *)(advised - interp_addr + interp->e.e_entry);
 
       info->interp_offset = advised - interp_addr;
+#     if defined(VGO_solaris)
+      if (thrptr_addr)
+         info->init_thrptr = thrptr_addr + info->interp_offset;
+#     endif
 
       VG_(free)(interp->p);
       VG_(free)(interp);
@@ -526,7 +571,7 @@
    return 0;
 }
 
-#endif // defined(VGO_linux)
+#endif // defined(VGO_linux) || defined(VGO_solaris)
 
 /*--------------------------------------------------------------------*/
 /*--- end                                                          ---*/
diff --git a/coregrind/m_ume/main.c b/coregrind/m_ume/main.c
index 11c6cc6..40dd460 100644
--- a/coregrind/m_ume/main.c
+++ b/coregrind/m_ume/main.c
@@ -51,7 +51,7 @@
 } ExeHandler;
 
 static ExeHandler exe_handlers[] = {
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    { VG_(match_ELF),    VG_(load_ELF) },
 #  elif defined(VGO_darwin)
    { VG_(match_macho),  VG_(load_macho) },
diff --git a/coregrind/m_ume/priv_ume.h b/coregrind/m_ume/priv_ume.h
index 4f78190..30e3975 100644
--- a/coregrind/m_ume/priv_ume.h
+++ b/coregrind/m_ume/priv_ume.h
@@ -27,7 +27,7 @@
    The GNU General Public License is contained in the file COPYING.
 */
 
-#if defined(VGO_linux) || defined(VGO_darwin)
+#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 #ifndef __PRIV_UME_H
 #define __PRIV_UME_H
@@ -36,7 +36,7 @@
 
 extern Int VG_(do_exec_inner)(const HChar *exe, ExeInfo *info);
 
-#if defined(VGO_linux)
+#if defined(VGO_linux) || defined(VGO_solaris)
 extern Bool VG_(match_ELF) ( const void *hdr, SizeT len );
 extern Int  VG_(load_ELF)  ( Int fd, const HChar *name, ExeInfo *info );
 #elif defined(VGO_darwin)
@@ -52,7 +52,7 @@
 
 #endif // __PRIV_UME_H
 
-#endif // defined(VGO_linux) || defined(VGO_darwin)
+#endif // defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 
 /*--------------------------------------------------------------------*/
 /*--- end                                                          ---*/
diff --git a/coregrind/m_vki.c b/coregrind/m_vki.c
index 2e1626b..8a83b05 100644
--- a/coregrind/m_vki.c
+++ b/coregrind/m_vki.c
@@ -76,7 +76,7 @@
 
    /* --- Platform-specific checks on signal sets --- */
 
-#  if defined(VGO_linux)
+#  if defined(VGO_linux) || defined(VGO_solaris)
    /* nothing to check */
 #  elif defined(VGP_x86_darwin) || defined(VGP_amd64_darwin)
    vg_assert(_VKI_NSIG == NSIG);
@@ -128,6 +128,14 @@
       syscall-amd64-darwin.S */
    vg_assert(VKI_SIG_SETMASK == 3);
 
+#  elif defined(VGO_solaris)
+   /* the toK- and fromK- forms are identical */
+   vg_assert(sizeof(vki_sigaction_toK_t)
+             == sizeof(vki_sigaction_fromK_t));
+   /* VKI_SET_SIGMASK is hardwired into syscall-x86-solaris.S
+      and syscall-amd64-solaris.S */
+   vg_assert(VKI_SIG_SETMASK == 3);
+
 #  else
 #     error "Unknown OS" 
 #  endif
diff --git a/coregrind/m_vkiscnums.c b/coregrind/m_vkiscnums.c
index 2e36551..e805248 100644
--- a/coregrind/m_vkiscnums.c
+++ b/coregrind/m_vkiscnums.c
@@ -86,6 +86,24 @@
 }
 
 //---------------------------------------------------------------------------
+#elif defined(VGO_solaris)
+//---------------------------------------------------------------------------
+
+const HChar *VG_(sysnum_string)(Word sysnum)
+{
+   static HChar buf[8+20+1];   // large enough
+
+   const HChar* classname = NULL;
+   switch (VG_SOLARIS_SYSNO_CLASS(sysnum)) {
+      case VG_SOLARIS_SYSCALL_CLASS_CLASSIC: classname = ""; break;
+      case VG_SOLARIS_SYSCALL_CLASS_FASTTRAP: classname = "fast:"; break;
+      default: classname = "UNKNOWN:"; break;
+   }
+   VG_(sprintf)(buf, "%s%ld", classname, VG_SOLARIS_SYSNO_INDEX(sysnum));
+   return buf;
+}
+
+//---------------------------------------------------------------------------
 #else
 //---------------------------------------------------------------------------
 #  error Unknown OS
diff --git a/coregrind/pub_core_aspacemgr.h b/coregrind/pub_core_aspacemgr.h
index aefb43e..3f58d81 100644
--- a/coregrind/pub_core_aspacemgr.h
+++ b/coregrind/pub_core_aspacemgr.h
@@ -70,6 +70,9 @@
 // Querying current status
 
 
+/* Finds an anonymous segment containing 'a'. Returned pointer is read only. */
+extern NSegment const *VG_(am_find_anon_segment) ( Addr a );
+
 /* Find the next segment along from 'here', if it is a file/anon/resvn
    segment. */
 extern NSegment const* VG_(am_next_nsegment) ( const NSegment* here,
@@ -122,7 +125,9 @@
 /* Describes a request for VG_(am_get_advisory). */
 typedef
    struct {
-      enum { MFixed, MHint, MAny } rkind;
+      /* Note: if rkind == MAlign then start specifies alignment. This is
+         Solaris specific. */
+      enum { MFixed, MHint, MAny, MAlign } rkind;
       Addr start;
       Addr len;
    }
@@ -209,8 +214,14 @@
    segment array accordingly. */
 extern SysRes VG_(am_mmap_file_fixed_client)
    ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset );
+extern SysRes VG_(am_mmap_file_fixed_client_flags)
+   ( Addr start, SizeT length, UInt prot, UInt flags, Int fd, Off64T offset );
 extern SysRes VG_(am_mmap_named_file_fixed_client)
-   ( Addr start, SizeT length, UInt prot, Int fd, Off64T offset, const HChar *name );
+   ( Addr start, SizeT length, UInt prot, Int fd,
+     Off64T offset, const HChar *name );
+extern SysRes VG_(am_mmap_named_file_fixed_client_flags)
+   ( Addr start, SizeT length, UInt prot, UInt flags, Int fd,
+     Off64T offset, const HChar *name );
 
 /* Map anonymously at a fixed address for the client, and update
    the segment array accordingly. */
diff --git a/coregrind/pub_core_clientstate.h b/coregrind/pub_core_clientstate.h
index dc93d1f..7087fbe 100644
--- a/coregrind/pub_core_clientstate.h
+++ b/coregrind/pub_core_clientstate.h
@@ -46,9 +46,11 @@
 extern Addr  VG_(clstk_start_base); // *Initial* lowest byte address
 extern Addr  VG_(clstk_end);        // Highest byte address
 extern UWord VG_(clstk_id);      // client stack id
+extern SizeT VG_(clstk_max_size); // max size of the main threads's client stack
 
-/* linux only: where is the client auxv ? */
-/* This is setup as part of setup_client_stack in initimg-linux.c. */
+/* Linux and Solaris only: where is the client auxv? */
+/* This is setup as part of setup_client_stack in initimg-linux.c
+   or initimg-solaris.c, respectively. */
 extern UWord* VG_(client_auxv);
 
 extern Addr  VG_(brk_base);	 // start of brk
@@ -71,6 +73,11 @@
 /* Same as above, but for /proc/<pid>/auxv. */
 extern Int VG_(cl_auxv_fd);
 
+#if defined(VGO_solaris)
+/* Same as above, but for /proc/<pid>/psinfo. */
+extern Int VG_(cl_psinfo_fd);
+#endif /* VGO_solaris */
+
 // Client's original rlimit data and rlimit stack
 extern struct vki_rlimit VG_(client_rlimit_data);
 extern struct vki_rlimit VG_(client_rlimit_stack);
@@ -93,6 +100,8 @@
    VG_(get_StackTrace) in m_stacktrace.c for further info. */
 extern Addr VG_(client__dl_sysinfo_int80);
 
+/* Obtains the initial client stack pointer from the finalised image info. */
+extern Addr VG_(get_initial_client_SP)(void);
 
 /* glibc nptl pthread systems only, when no-nptl-pthread-stackcache
    was given in --sim-hints.
@@ -112,6 +121,12 @@
    way to disable the pthread stack cache. */
 extern SizeT* VG_(client__stack_cache_actsize__addr);
 
+#if defined(VGO_solaris)
+/* Address of variable vg_vfork_fildes in vgpreload_core.so.0
+   (vg_preloaded.c). */
+extern Int* VG_(vfork_fildes_addr);
+#endif
+
 #endif   // __PUB_CORE_CLIENTSTATE_H
 
 /*--------------------------------------------------------------------*/
diff --git a/coregrind/pub_core_debuginfo.h b/coregrind/pub_core_debuginfo.h
index bb57b33..5a99d15 100644
--- a/coregrind/pub_core_debuginfo.h
+++ b/coregrind/pub_core_debuginfo.h
@@ -62,7 +62,7 @@
    released by simply re-opening and closing the same file (even via
    different fd!).
 */
-#if defined(VGO_linux) || defined(VGO_darwin)
+#if defined(VGO_linux) || defined(VGO_darwin) || defined(VGO_solaris)
 extern ULong VG_(di_notify_mmap)( Addr a, Bool allow_SkFileV, Int use_fd );
 
 extern void VG_(di_notify_munmap)( Addr a, SizeT len );
diff --git a/coregrind/pub_core_initimg.h b/coregrind/pub_core_initimg.h
index 428b0c2..7a20c03 100644
--- a/coregrind/pub_core_initimg.h
+++ b/coregrind/pub_core_initimg.h
@@ -119,6 +119,30 @@
    Addr  initial_client_IP;
 };
 
+/* ------------------------- Solaris ------------------------- */
+
+#elif defined(VGO_solaris)
+
+struct _IICreateImageInfo {
+   /* ------ Mandatory fields ------ */
+   const HChar* toolname;
+   Addr    sp_at_startup;
+   Addr    clstack_end; /* highest stack addressable byte */
+   /* ------ Per-OS fields ------ */
+   HChar** argv;
+   HChar** envp;
+};
+
+struct _IIFinaliseImageInfo {
+   /* ------ Mandatory fields ------ */
+   SizeT clstack_max_size;
+   Addr  initial_client_SP;
+   /* ------ Per-OS fields ------ */
+   Addr  initial_client_IP;
+   Addr  initial_client_TOC;
+   UInt* client_auxv;
+   Addr  initial_client_TP; /* thread pointer */
+};
 
 #else
 #  error "Unknown OS"
diff --git a/coregrind/pub_core_libcassert.h b/coregrind/pub_core_libcassert.h
index 32cfc50..5a7931a 100644
--- a/coregrind/pub_core_libcassert.h
+++ b/coregrind/pub_core_libcassert.h
@@ -75,8 +75,9 @@
 
 /* Called when some unhandleable client behaviour is detected.
    Prints a msg and aborts. */
-extern void VG_(unimplemented) ( const HChar* msg )
-            __attribute__((__noreturn__));
+extern void VG_(unimplemented) ( const HChar* format, ... )
+            __attribute__((__noreturn__))
+            PRINTF_CHECK(1, 2);
 
 /* Show the state of various threads related information, such
    as the guest stacktrace for each thread.
diff --git a/coregrind/pub_core_libcproc.h b/coregrind/pub_core_libcproc.h
index a1f1e9a..8c16b67 100644
--- a/coregrind/pub_core_libcproc.h
+++ b/coregrind/pub_core_libcproc.h
@@ -71,8 +71,11 @@
 // Environment manipulations
 extern HChar **VG_(env_setenv)   ( HChar ***envp, const HChar* varname,
                                    const HChar *val );
-extern void    VG_(env_unsetenv) ( HChar **env, const HChar *varname );
-extern void    VG_(env_remove_valgrind_env_stuff) ( HChar** env ); 
+extern void    VG_(env_unsetenv) ( HChar **env, const HChar *varname,
+                                   void (*free_fn) ( void *) );
+extern void    VG_(env_remove_valgrind_env_stuff) ( HChar** env,
+                                                    Bool ro_strings,
+                                                    void (*free_fn) (void *) );
 extern HChar **VG_(env_clone)    ( HChar **env_clone );
 
 // misc
diff --git a/coregrind/pub_core_machine.h b/coregrind/pub_core_machine.h
index 7fb7afe..a20681f 100644
--- a/coregrind/pub_core_machine.h
+++ b/coregrind/pub_core_machine.h
@@ -41,12 +41,12 @@
 #include "pub_core_basics.h"      // UnwindStartRegs
 
 // XXX: this is *really* the wrong spot for these things
-#if defined(VGP_x86_linux)
+#if defined(VGP_x86_linux) || defined(VGP_x86_solaris)
 #  define VG_ELF_DATA2XXX     ELFDATA2LSB
 #  define VG_ELF_MACHINE      EM_386
 #  define VG_ELF_CLASS        ELFCLASS32
 #  undef  VG_PLAT_USES_PPCTOC
-#elif defined(VGP_amd64_linux)
+#elif defined(VGP_amd64_linux) || defined(VGP_amd64_solaris)
 #  define VG_ELF_DATA2XXX     ELFDATA2LSB
 #  define VG_ELF_MACHINE      EM_X86_64
 #  define VG_ELF_CLASS        ELFCLASS64
@@ -169,6 +169,7 @@
 // Offsets for the Vex state
 #define VG_O_STACK_PTR        (offsetof(VexGuestArchState, VG_STACK_PTR))
 #define VG_O_INSTR_PTR        (offsetof(VexGuestArchState, VG_INSTR_PTR))
+#define VG_O_FRAME_PTR        (offsetof(VexGuestArchState, VG_FRAME_PTR))
 #define VG_O_FPC_REG          (offsetof(VexGuestArchState, VG_FPC_REG))
 
 
diff --git a/coregrind/pub_core_mallocfree.h b/coregrind/pub_core_mallocfree.h
index eeb4aa1..e3f0820 100644
--- a/coregrind/pub_core_mallocfree.h
+++ b/coregrind/pub_core_mallocfree.h
@@ -63,9 +63,10 @@
 // This is both the minimum payload size of a malloc'd block, and its
 // minimum alignment.  Must be a power of 2 greater than 4, and should be
 // greater than 8.
-#if   defined(VGP_x86_linux)   || \
-      defined(VGP_arm_linux)   || \
-      defined(VGP_mips32_linux)
+#if   defined(VGP_x86_linux)    || \
+      defined(VGP_arm_linux)    || \
+      defined(VGP_mips32_linux) || \
+      defined(VGP_x86_solaris)
 #  define VG_MIN_MALLOC_SZB        8
 // Nb: We always use 16 bytes for Darwin, even on 32-bits, so it can be used
 // for any AltiVec- or SSE-related type.  This matches the Darwin libc.
@@ -80,7 +81,8 @@
       defined(VGP_x86_darwin)     || \
       defined(VGP_amd64_darwin)   || \
       defined(VGP_arm64_linux)    || \
-      defined(VGP_tilegx_linux)
+      defined(VGP_tilegx_linux)   || \
+      defined(VGP_amd64_solaris)
 #  define VG_MIN_MALLOC_SZB       16
 #else
 #  error Unknown platform
diff --git a/coregrind/pub_core_options.h b/coregrind/pub_core_options.h
index 53ccfc0..a1239f8 100644
--- a/coregrind/pub_core_options.h
+++ b/coregrind/pub_core_options.h
@@ -220,6 +220,7 @@
 typedef
    enum {
       SimHint_lax_ioctls,
+      SimHint_lax_doors,
       SimHint_fuse_compatible,
       SimHint_enable_outer,
       SimHint_no_inner_prefix,
diff --git a/coregrind/pub_core_sigframe.h b/coregrind/pub_core_sigframe.h
index b1f385c..74f0d1b 100644
--- a/coregrind/pub_core_sigframe.h
+++ b/coregrind/pub_core_sigframe.h
@@ -41,9 +41,15 @@
 // frame appropriately.
 //--------------------------------------------------------------------
 
+/* This is an arbitrary si_code that we only use internally for SIGSEGV.
+   It corresponds to the value SI_KERNEL on Linux, but that's not really
+   of any significance. */
+#define VKI_SEGV_MADE_UP_GPF 0x80
+
 /* Create a signal frame for thread 'tid'. */
 extern 
 void VG_(sigframe_create) ( ThreadId tid, 
+                            Bool on_altstack,
                             Addr sp_top_of_frame,
                             const vki_siginfo_t *siginfo,
                             const struct vki_ucontext *uc,
@@ -57,6 +63,11 @@
 extern 
 void VG_(sigframe_destroy)( ThreadId tid, Bool isRT );
 
+#if defined(VGO_solaris)
+extern
+void VG_(sigframe_return)(ThreadId tid, const vki_ucontext_t *uc);
+#endif
+
 #endif   // __PUB_CORE_SIGFRAME_H
 
 /*--------------------------------------------------------------------*/
diff --git a/coregrind/pub_core_syscall.h b/coregrind/pub_core_syscall.h
index e1563b4..b7ff146 100644
--- a/coregrind/pub_core_syscall.h
+++ b/coregrind/pub_core_syscall.h
@@ -87,6 +87,8 @@
 extern SysRes VG_(mk_SysRes_mips64_linux)( ULong v0, ULong v1,
                                            ULong a3 );
 extern SysRes VG_(mk_SysRes_tilegx_linux)( Long val );
+extern SysRes VG_(mk_SysRes_x86_solaris) ( Bool isErr, UInt val, UInt val2 );
+extern SysRes VG_(mk_SysRes_amd64_solaris) ( Bool isErr, ULong val, ULong val2 );
 extern SysRes VG_(mk_SysRes_Error)       ( UWord val );
 extern SysRes VG_(mk_SysRes_Success)     ( UWord val );
 
diff --git a/coregrind/pub_core_syswrap.h b/coregrind/pub_core_syswrap.h
index 49dbc1e..3c8047c 100644
--- a/coregrind/pub_core_syswrap.h
+++ b/coregrind/pub_core_syswrap.h
@@ -33,6 +33,7 @@
 
 #include "pub_core_basics.h"        // VG_ macro
 #include "pub_core_threadstate.h"   // ThreadArchState
+#include "pub_core_tooliface.h"     // CorePart
 
 //--------------------------------------------------------------------
 // PURPOSE: This module contains all the syscall junk:  mostly PRE/POST
@@ -58,9 +59,15 @@
                ThreadId tid,
                Addr     ip, 
                SysRes   sysret,
-               Bool     restart
+               Bool     restart,
+               struct vki_ucontext *uc
             );
 
+#if defined(VGO_solaris)
+// Determine if in a blocking syscall.
+extern Bool VG_(is_ip_in_blocking_syscall)(ThreadId tid, Addr ip);
+#endif
+
 // Wait until all other threads are dead
 extern void VG_(reap_threads)(ThreadId self);
 
@@ -80,6 +87,14 @@
 extern void (* VG_(address_of_m_main_shutdown_actions_NORETURN) )
             (ThreadId,VgSchedReturnCode);
 
+#if defined(VGO_solaris)
+extern void VG_(save_context)(ThreadId tid, vki_ucontext_t *uc,
+                              CorePart part);
+extern void VG_(restore_context)(ThreadId tid, vki_ucontext_t *uc,
+                                 CorePart part, Bool esp_is_thrptr);
+extern void VG_(syswrap_init)(void);
+#endif
+
 #endif   // __PUB_CORE_SYSWRAP_H
 
 /*--------------------------------------------------------------------*/
diff --git a/coregrind/pub_core_threadstate.h b/coregrind/pub_core_threadstate.h
index 28d8cb8..935ab3e 100644
--- a/coregrind/pub_core_threadstate.h
+++ b/coregrind/pub_core_threadstate.h
@@ -266,6 +266,53 @@
             char *path;
          } io_registry_entry_from_path;
       } mach_args;
+
+#     elif defined(VGO_solaris)
+#     if defined(VGP_x86_solaris)
+      /* A pointer to thread related data. The pointer is used to set up
+         a segment descriptor (GDT[VKI_GDT_LWPGS]) when the thread is about to
+         be run. A client program sets this value explicitly by calling the
+         lwp_private syscall or it can be passed as a part of ucontext_t when
+         a new thread is created (the lwp_create syscall). */
+      Addr thrptr;
+#     elif defined(VGP_amd64_solaris)
+      /* GDT is not fully simulated by AMD64/Solaris. The %fs segment
+         register is assumed to be always zero and vex->guest_FS_CONST holds
+         the 64-bit offset associated with a %fs value of zero. */
+#     endif
+
+      /* Stack id (value (UWord)(-1) means that there is no stack). This
+         tracks a stack that is set in restore_stack(). */
+      UWord stk_id;
+
+      /* Simulation of the kernel's lwp->lwp_ustack. Set in the PRE wrapper
+         of the getsetcontext syscall, for SETUSTACK. Used in
+         VG_(save_context)(), VG_(restore_context)() and
+         VG_(sigframe_create)(). */
+      vki_stack_t *ustack;
+
+      /* Flag saying if the current call is in the door_return() variant of
+         the door() syscall. */
+      Bool in_door_return;
+
+      /* Address of the door server procedure corresponding to the current
+         thread. Used to keep track which door call the current thread
+         services. Valid only between subsequent door_return() invocations. */
+      Addr door_return_procedure;
+
+      /* Simulation of the kernel's lwp->lwp_oldcontext. Set in
+         VG_(restore_context)() and VG_(sigframe_create)(). Used in
+         VG_(save_context)(). */
+      vki_ucontext_t *oldcontext;
+
+      /* Address of sc_shared_t struct shared between kernel and libc.
+         Set in POST(sys_schedctl). Every thread gets its own address
+         but typically many are squeezed on a singled mapped page.
+         Cleaned in the child atfork handler. */
+      Addr schedctl_data;
+
+      /* True if this is daemon thread. */
+      Bool daemon_thread;
 #     endif
 
    }
diff --git a/coregrind/pub_core_tooliface.h b/coregrind/pub_core_tooliface.h
index 6968a18..a2f1939 100644
--- a/coregrind/pub_core_tooliface.h
+++ b/coregrind/pub_core_tooliface.h
@@ -233,6 +233,9 @@
    void (*track_post_reg_write_clientcall_return)(ThreadId, PtrdiffT, SizeT,
                                                   Addr);
 
+   void (*track_copy_mem_to_reg)(CorePart, ThreadId, Addr, PtrdiffT, SizeT);
+   void (*track_copy_reg_to_mem)(CorePart, ThreadId, PtrdiffT, Addr, SizeT);
+
    void (*track_start_client_code)(ThreadId, ULong);
    void (*track_stop_client_code) (ThreadId, ULong);
 
diff --git a/coregrind/pub_core_trampoline.h b/coregrind/pub_core_trampoline.h
index 7630729..ff96137 100644
--- a/coregrind/pub_core_trampoline.h
+++ b/coregrind/pub_core_trampoline.h
@@ -161,6 +161,20 @@
 extern UInt  VG_(mips64_linux_REDIR_FOR_strlen)( void* );
 #endif
 
+#if defined(VGP_x86_solaris)
+extern SizeT VG_(x86_solaris_REDIR_FOR_strcmp)(const HChar *, const HChar *);
+extern SizeT VG_(x86_solaris_REDIR_FOR_strlen)(const HChar *);
+#endif
+
+#if defined(VGP_amd64_solaris)
+extern HChar *VG_(amd64_solaris_REDIR_FOR_strcpy)(HChar *, const HChar *);
+extern HChar *VG_(amd64_solaris_REDIR_FOR_strncpy)(HChar *, const HChar *,
+                                                  SizeT);
+extern Int VG_(amd64_solaris_REDIR_FOR_strcmp)(const HChar *, const HChar *);
+extern HChar *VG_(amd64_solaris_REDIR_FOR_strcat)(HChar *, const HChar *);
+extern SizeT VG_(amd64_solaris_REDIR_FOR_strlen)(const HChar *);
+#endif
+
 #endif   // __PUB_CORE_TRAMPOLINE_H
 
 /*--------------------------------------------------------------------*/
diff --git a/coregrind/pub_core_ume.h b/coregrind/pub_core_ume.h
index 937778f..d9f6491 100644
--- a/coregrind/pub_core_ume.h
+++ b/coregrind/pub_core_ume.h
@@ -64,6 +64,10 @@
       HChar* executable_path; // OUT: path passed to execve()
 #endif
 
+#if defined(VGO_solaris)
+      Addr  init_thrptr; // OUT: architecture-specific user per-thread location
+#endif
+
       Addr entry;        // OUT: entrypoint in main executable
       Addr init_ip;      // OUT: address of first instruction to execute
       Addr brkbase;      // OUT: base address of brk segment
diff --git a/coregrind/vg_preloaded.c b/coregrind/vg_preloaded.c
index fbb2e28..576e70f 100644
--- a/coregrind/vg_preloaded.c
+++ b/coregrind/vg_preloaded.c
@@ -192,8 +192,167 @@
     // but don't care if it's initialized
 }
 
-#else
+#elif defined(VGO_solaris)
 
+/* Declare the errno and environ symbols weakly in case the client is not
+   linked against libc. In such a case it also cannot run replacement
+   functions for set_error() and spawnveg() where these two variables are
+   needed so this is ok. */
+__attribute__((weak)) extern int errno;
+__attribute__((weak)) extern char **environ;
+
+#include <assert.h>
+#include <errno.h>
+#include <spawn.h>
+#include <sys/syscall.h>
+#include <sys/signal.h>
+#include <unistd.h>
+
+/* Replace function block_all_signals() from libc. When the client program is
+   not running under valgrind, the function blocks all signals by setting
+   sc_sigblock flag in the schedctl control block. When run under Valgrind
+   this would bypass Valgrind's syscall and signal machinery.
+   Valgrind's signal machinery needs to retain control over which signals are
+   blocked and which not (see m_signals.c and m_scheduler/scheduler.c for more
+   information - typically synchronous signals should not be blocked).
+   Therefore this function replacement emulates lwp_sigmask syscall.
+*/
+void VG_REPLACE_FUNCTION_ZU(VG_Z_LIBC_SONAME, block_all_signals)(/*ulwp_t*/ void *self);
+void VG_REPLACE_FUNCTION_ZU(VG_Z_LIBC_SONAME, block_all_signals)(/*ulwp_t*/ void *self)
+{
+   syscall(SYS_lwp_sigmask, SIG_SETMASK, ~0U, ~0U, ~0U, ~0U);
+}
+
+/* Replace functions get_error() and set_error() in libc. These functions are
+   internal to the library and are used to work with an error value returned
+   by posix_spawn() (when it is implemented using vfork()). A child calls
+   set_error() to set an error code and the parent then calls get_error() to
+   read it. Accessor functions are used so these trivial store+load operations
+   are not changed by the compiler in any way.
+
+   Since Valgrind translates vfork() to a normal fork(), calling set_error()
+   by the child would have no effect on the error value in the parent so
+   something must be done to fix this problem.
+
+   A pipe is created between a child and its parent in the forksys pre-wrapper
+   when a vfork() is encountered. The child's end of the pipe is closed when
+   the child exits or execs (because close-on-exec is set on the file
+   descriptor). Valgrind (the parent) waits on the child's end of the pipe to
+   be closed which preserves the vfork() behaviour that the parent process is
+   suspended while the child is using its resources.
+
+   The pipe is then used to send an eventual error code set by the child in
+   posix_spawn() to the parent. If there is any error Valgrind returns it as
+   an error from the vfork() syscall. This means the syscall can return errors
+   that it would normally never return but this is not a problem in practice
+   because any error is directly propagated as a return code from
+   posix_spawn().
+
+   Address of vg_vfork_fildes is found by Valgrind when debug information for
+   vgpreload_core.so is being processed. A value of this variable is set in
+   the forksys pre-wrapper before a fork() call is made and set back to -1
+   before returning from the wrapper by the parent.
+
+   Newer Solaris versions introduce the spawn syscall and posix_spawn() is
+   implemented using it. The redirect is not needed for these versions.
+*/
+int vg_vfork_fildes = -1;
+
+int VG_REPLACE_FUNCTION_ZU(VG_Z_LIBC_SONAME, get_error)(int *errp);
+int VG_REPLACE_FUNCTION_ZU(VG_Z_LIBC_SONAME, get_error)(int *errp)
+{
+   /* Always return 0 when the parent tries to call get_error(). Any error
+      from the child is returned directly as an error from the vfork child.
+      Value pointed by errp is initialized only by the child so not
+      redirecting this function would mean that the parent gets an
+      uninitialized/garbage value when it calls this function. */
+   return 0;
+}
+
+int VG_REPLACE_FUNCTION_ZU(VG_Z_LIBC_SONAME, set_error)(int *errp, int err);
+int VG_REPLACE_FUNCTION_ZU(VG_Z_LIBC_SONAME, set_error)(int *errp, int err)
+{
+   *errp = err;
+
+   /* Libc should always call set_error() only after doing a vfork() syscall
+      in posix_spawn(). The forksys pre-wrapper saves a descriptor of the
+      child's end of the pipe in vg_vfork_fildes so it is an error if it is
+      not a valid file descriptor at this point. */
+   assert(vg_vfork_fildes >= 0);
+   /* Current protocol between this function and the forksys pre-wrapper
+      allows to send only errors in range [0, 255] (one byte values). */
+   assert(err >= 0 && err <= 0xff);
+
+   if (err != 0) {
+      unsigned char w = (unsigned char)(err & 0xff);
+      ssize_t res;
+      do {
+         res = write(vg_vfork_fildes, &w, 1);
+         assert(res == 1 || (errno == EINTR || errno == ERESTART));
+      } while (res != 1);
+   }
+
+   return err;
+}
+
+/* Replace spawnveg() in libast.so.1. This function is used by ksh to spawn
+   new processes. The library has a build time option to select between
+   several variants of this function based on behaviour of vfork() and
+   posix_spawn() on the system for which the library is being compiled.
+   Unfortunately, Solaris and illumos use the real vfork() variant which does
+   not work correctly with the vfork() -> fork() translation done by Valgrind
+   (see the forksys pre-wrapper for details). Therefore the function is
+   replaced here with an implementation that uses posix_spawn(). This
+   replacement can be removed when a configuration of libast in Solaris and
+   illumos is changed to use the posix_spawn() implementation.
+*/
+pid_t VG_REPLACE_FUNCTION_ZU(libastZdsoZd1, spawnveg)(const char *command,
+                                                      char **argv,
+                                                      char **envv,
+                                                      pid_t pgid);
+pid_t VG_REPLACE_FUNCTION_ZU(libastZdsoZd1, spawnveg)(const char *command,
+                                                      char **argv,
+                                                      char **envp,
+                                                      pid_t pgid)
+{
+   int err = 0;
+   pid_t pid;
+   posix_spawnattr_t attr;
+   int attr_init_done = 0;
+
+   err = posix_spawnattr_init(&attr);
+   if (err != 0)
+      goto out;
+   attr_init_done = 1;
+
+   err = posix_spawnattr_init(&attr);
+   if (err != 0)
+      goto out;
+
+   if (pgid != 0) {
+      if (pgid <= 1)
+         pgid = 0;
+      err = posix_spawnattr_setpgroup(&attr, pgid);
+      if (err != 0)
+         goto out;
+      err = posix_spawnattr_setflags(&attr, POSIX_SPAWN_SETPGROUP);
+      if (err != 0)
+         goto out;
+   }
+
+   err = posix_spawn(&pid, command, NULL, &attr, argv, envp ? envp : environ);
+
+out:
+   if (attr_init_done)
+      posix_spawnattr_destroy(&attr);
+   if (err != 0) {
+      errno = err;
+      return -1;
+   }
+   return pid;
+}
+
+#else
 #  error Unknown OS
 #endif
 
diff --git a/coregrind/vgdb-invoker-solaris.c b/coregrind/vgdb-invoker-solaris.c
new file mode 100644
index 0000000..019441b
--- /dev/null
+++ b/coregrind/vgdb-invoker-solaris.c
@@ -0,0 +1,530 @@
+/*--------------------------------------------------------------------*/
+/*--- Implementation of vgdb invoker subsystem on Solaris             */
+/*                      via /proc filesystem and control messages. ---*/
+/*--------------------------------------------------------------------*/
+
+/*
+   This file is part of Valgrind, a dynamic binary instrumentation
+   framework.
+
+   Copyright (C) 2014-2015 Ivo Raisr <ivosh@ivosh.net>
+
+   This program is free software; you can redistribute it and/or
+   modify it under the terms of the GNU General Public License as
+   published by the Free Software Foundation; either version 2 of the
+   License, or (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful, but
+   WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
+   02111-1307, USA.
+
+   The GNU General Public License is contained in the file COPYING.
+*/
+
+/* This module implements vgdb-invoker subsystem as per vgdb.h
+   on Solaris. It differs significantly from the other ptrace-based
+   implementation found in vgdb-invoker-ptrace.c. However the goal
+   is the same - to work on the following scenario:
+
+   - A valgrind process (referred to also as an inferior process)
+     is remotely debugged with gdb.
+   - All threads of the inferior process are stuck in blocking
+     syscalls.
+   - Therefore no thread can process packets received from gdb.
+
+   When module vgdb.c detects this situation then it calls
+   function invoker_invoke_gdbserver() within the context of
+   invoke_gdbserver_in_valgrind_thread thread. The steps of
+   interaction between vgdb and m_gdbserver module are as follows:
+
+   1. Function invoker_invoke_gdbserver() attaches to the inferior
+      process and stops all threads.
+   2. It gets registers of the first thread and modifies them
+      and the stack so that a call to "invoke_gdbserver" function
+      is arranged along with a function parameter.
+   3. Then it creates an agent thread within the inferior process
+      with these modified registers and waits until the agent thread
+      exits.
+   4. Meanwhile in the inferior process function
+      VG_(invoke_gdbserver)() is invoked within the context of the
+      agent thread; all other threads are still stopped.
+   5. The agent thread processes packets from gdb relayed by vgdb.
+   6. Eventually processing is finished and the agent thread exits
+      in function give_control_back_to_vgdb().
+   7. vgdb then detaches from the inferior process and thus resumes
+      all the stopped threads.
+ */
+
+#include "vgdb.h"
+
+#include <assert.h>
+#include <errno.h>
+#include <string.h>
+
+typedef Addr CORE_ADDR;
+
+typedef struct {
+   long cmd;
+   union {
+       long flags;
+       prgregset_t regs;
+   } arg;
+} ctl_t;
+
+/* Process control file /proc/<pid>/ctl.
+   Once this file is closed, PR_RLC flag takes effect and
+   inferior process resumes automatically. */
+static int ctl_fd = -1;
+
+/* Copy LEN bytes of data from vgdb memory at MYADDR
+   to valgrind memory at MEMADDR.
+   On failure (cannot write the valgrind memory)
+   returns the value of errno. */
+static int write_memory(pid_t pid, CORE_ADDR memaddr,
+                        const void *myaddr, size_t len)
+{
+   char procname[PATH_MAX];
+   snprintf(procname, sizeof(procname), "/proc/%d/as", pid);
+
+   /* Open the process address-space file. */
+   int as_fd = open(procname, O_WRONLY, 0);
+   if (as_fd < 0) {
+      int error = errno;
+      ERROR(error, "Failed to open %s.\n", procname);
+      return error;
+   }
+
+   if (debuglevel >= 1) {
+      DEBUG(1, "Writing bytes '");
+      size_t i;
+      for (i = 0; i < len; i++)
+         PDEBUG(1, "%02x", ((const unsigned char *) myaddr)[i]);
+      PDEBUG(1, "' to address %#lx.\n", memaddr);
+   }
+
+   ssize_t written = pwrite(as_fd, myaddr, len, memaddr);
+   if ((written < 0) || (written != len)) {
+      int error = errno;
+      ERROR(error, "Failed to write to file %s, memory block of %zu"
+            " bytes at %#lx to %#lx.\n",
+            procname, len, (Addr) myaddr, memaddr);
+      close(as_fd);
+      return error;
+   }
+
+   DEBUG(1, "Written ok.\n");
+   close(as_fd);
+   return 0;
+}
+
+/* Attaches to a process identified by pid and stops all threads. */
+static Bool attach(pid_t pid)
+{
+   char procname[PATH_MAX];
+   snprintf(procname, sizeof(procname), "/proc/%d/ctl", pid);
+
+   DEBUG(1, "Attaching to pid %d.\n", pid);
+
+   /* Open the process control file. */
+   ctl_fd = open(procname, O_WRONLY, 0);
+   if (ctl_fd < 0) {
+      ERROR(errno, "Failed to open %s.\n", procname);
+      return False;
+   }
+
+   DEBUG(1, "Setting run-on-last-close-flag (PR_RLC) to pid %d.\n", pid);
+
+   /* Set run-on-last-close flag. */
+   ctl_t ctl;
+   ctl.cmd = PCSET;
+   ctl.arg.flags = PR_RLC;
+   size_t bytes = sizeof(ctl.cmd) + sizeof(ctl.arg.flags);
+   ssize_t written = write(ctl_fd, (void *) &ctl, bytes);
+   if ((written < 0) || (written != bytes)) {
+      ERROR(errno, "Failed to write to ctl_fd: PCSET + PR_RLC.\n");
+      return False;
+   }
+
+   DEBUG(1, "Stopping process %d.\n", pid);
+
+   /* Stop the whole process - all threads. */
+   ctl.cmd = PCSTOP;
+   bytes = sizeof(ctl.cmd);
+   written = write(ctl_fd, (void *) &ctl, bytes);
+   if ((written < 0) || (written != bytes)) {
+      ERROR(errno, "Failed to write to ctl_fd: PCSTOP.\n");
+      return False;
+   }
+
+   DEBUG(1, "Process %d stopped.\n", pid);
+
+   /* Now confirm it is actually the case. */
+   snprintf(procname, sizeof(procname), "/proc/%d/status", pid);
+   int status_fd = open(procname, O_RDONLY, 0);
+   if (status_fd < 0) {
+      ERROR(errno, "Failed to open %s.\n", procname);
+      return False;
+   }
+
+   pstatus_t pstatus;
+   bytes = read(status_fd, &pstatus, sizeof(pstatus));
+   if ((bytes < 0) || (bytes != sizeof(pstatus))) {
+      ERROR(errno, "Failed to read from %s.\n", procname);
+      close(status_fd);
+      return False;
+   }
+
+   if (pstatus.pr_flags & PR_RLC) {
+      DEBUG(2, "Process %d has run-on-last-close flag set. Good.\n", pid);
+   } else {
+      ERROR(0, "Process %d does not have run-on-last-close flag set!\n", pid);
+      close(status_fd);
+      return False;
+   }
+
+   if (pstatus.pr_lwp.pr_flags & PR_STOPPED) {
+      DEBUG(3, "Process %d seems to be stopped. Good.\n", pid);
+   } else {
+      ERROR(0, "Process %d is not stopped!\n", pid);
+      close(status_fd);
+      return False;
+   }
+
+   close(status_fd);
+   return True;
+}
+
+static void detach(pid_t pid)
+{
+   if (ctl_fd != -1) {
+      close(ctl_fd);
+      ctl_fd = -1;
+   }
+
+   DEBUG(1, "Detached from pid %d.\n", pid);
+}
+
+/* Gets the registers of the first thread. */
+static Bool get_regs(pid_t pid, prgregset_t *regs)
+{
+   char procname[PATH_MAX];
+   snprintf(procname, sizeof(procname), "/proc/%d/lwp/1/lwpstatus", pid);
+
+   DEBUG(1, "Getting registers from the first thread of process %d.\n", pid);
+
+   /* Open the first thread's status file. */
+   int status_fd = open(procname, O_RDONLY, 0);
+   if (status_fd < 0) {
+      ERROR(errno, "Failed to open file %s.\n", procname);
+      return False;
+   }
+
+   lwpstatus_t status;
+   ssize_t bytes = read(status_fd, &status, sizeof(status));
+   if ((bytes < 0) || (bytes != sizeof(status))) {
+      ERROR(errno, "Failed to read from %s.\n", procname);
+      close(status_fd);
+      return False;
+   }
+
+   DEBUG(3, "Registers of thread %d from process %d: ", status.pr_lwpid, pid);
+   unsigned int i;
+   for (i = 0; i < _NGREG; i++) {
+      PDEBUG(3, "%u: %#lx, ", i, (unsigned long) status.pr_reg[i]);
+   }
+   PDEBUG(3, "\n");
+
+   memcpy(regs, &status.pr_reg, sizeof(prgregset_t));
+   close(status_fd);
+   return True;
+}
+
+/* Modifies the register set so that a new stack frame is created
+   for "invoke_gdbserver" function with an extra argument.
+   The argument is written to the stack of the first thread.
+ */
+static Bool setup_stack_frame(pid_t pid, prgregset_t *regs)
+{
+   DEBUG(1, "Setting up new stack frame of process %d.\n", pid);
+
+   /* A specific int value is passed to invoke_gdbserver(), to check
+      everything goes according to the plan. */
+   const int check = 0x8BADF00D; // ate bad food.
+
+   /* A bad return address will be pushed on the stack.
+      Function invoke_gdbserver() cannot return. If it ever returns,
+      a NULL address pushed on the stack should ensure this is
+      detected. */
+   const Addr bad_return = 0;
+
+#if defined(VGA_x86)
+   Addr sp = (*regs)[UESP];
+#elif defined(VGA_amd64)
+   Addr sp = (*regs)[REG_RSP];
+#else
+   I_die_here : (sp) architecture missing in vgdb-invoker-solaris.c
+#endif
+
+   if (shared32 != NULL) {
+      /* vgdb speaking with a 32bit executable. */
+#if   defined(VGA_x86) || defined(VGA_amd64)
+      const size_t regsize = 4;
+
+      /* Push check argument on the stack - according to C/ia32 ABI. */
+      sp = sp - regsize;
+      DEBUG(1, "Pushing check argument to process %d memory.\n", pid);
+      assert(regsize == sizeof(check));
+      int error = write_memory(pid, sp, &check, regsize);
+      if (error != 0) {
+         ERROR(error, "Failed to push check argument to process %d memory.\n",
+                      pid);
+         detach(pid);
+         return False;
+      }
+
+      sp = sp - regsize;
+      DEBUG(1, "Pushing bad_return return address to process %d memory.\n",
+               pid);
+      /* Note that even for a 64 bits vgdb, only 4 bytes
+         of NULL bad_return are written. */
+      error = write_memory(pid, sp, &bad_return, regsize);
+      if (error != 0) {
+         ERROR(error, "Failed to push bad_return return address to process %d "
+                      "memory.\n", pid);
+         detach(pid);
+         return False;
+      }
+
+#if   defined(VGA_x86)
+      /* Set EBP, ESP, EIP to invoke gdbserver.
+         vgdb is 32bits, speaking with a 32bits process. */
+      (*regs)[EBP] = sp; // bp set to sp
+      (*regs)[UESP] = sp;
+      (*regs)[EIP] = shared32->invoke_gdbserver;
+#elif defined(VGA_amd64)
+      /* Set RBP, RSP, RIP to invoke gdbserver.
+         vgdb is 64bits, speaking with a 32bits process. */
+      (*regs)[REG_RBP] = sp; // bp set to sp
+      (*regs)[REG_RSP] = sp;
+      (*regs)[REG_RIP] = shared32->invoke_gdbserver;
+#else
+      I_die_here : not x86 or amd64 in x86/amd64 section/
+#endif
+
+#else
+      I_die_here : architecture missing in vgdb-invoker-solaris.c
+#endif
+
+   } else if (shared64 != NULL) {
+#if defined(VGA_x86)
+      assert(0); /* 64bits process with a 32bits vgdb - no way */
+#elif defined(VGA_amd64)
+      /* 64bits vgdb speaking with a 64 bit process. */
+      const int regsize = 8;
+
+      /* Give check argument in rdi - according to C/amd64 ABI. */
+      (*regs)[REG_RDI] = check;
+
+      /* Push return address on stack: return to breakaddr. */
+      sp = sp - regsize;
+      DEBUG(1, "Pushing bad_return return address to process %d memory.\n",
+               pid);
+      int error = write_memory(pid, sp, &bad_return,
+                               sizeof(bad_return));
+      if (error != 0) {
+         ERROR(error, "Failed to push bad_return return address to process %d "
+                      "memory.\n", pid);
+         detach(pid);
+         return False;
+      }
+
+      /* set RBP, RSP, RIP to invoke gdbserver */
+      (*regs)[REG_RBP] = sp; // bp set to sp
+      (*regs)[REG_RSP] = sp;
+      (*regs)[REG_RIP] = shared64->invoke_gdbserver;
+#else
+      I_die_here: architecture missing in vgdb-invoker-solaris.c
+#endif
+   } else {
+      assert(0);
+   }
+
+   DEBUG(1, "New stack frame set up for process %d.\n", pid);
+   return True;
+}
+
+/* Creates and starts an agent thread within the inferior process.
+   The agent thread is created stopped and with its held signal set
+   (the signal mask) having all signals except SIGKILL and SIGSTOP
+   blocked. All these signals need to remain blocked while the agent
+   thread is running because valgrind syscall/signal machinery expects
+   that (remember: all valgrind threads are blocked in VgTs_WaitSys
+   - that is the reason why we are invoking the agent, after all).
+   It is necessary to resume the agent thread afterwards.
+ */
+static Bool invoke_agent(pid_t pid, prgregset_t *regs, id_t *agent_lwpid)
+{
+   assert(ctl_fd != -1);
+
+   DEBUG(1, "Creating an agent thread within process %d.\n", pid);
+
+   /* Create the agent thread. */
+   ctl_t ctl;
+   ctl.cmd = PCAGENT;
+   memcpy(&ctl.arg.regs, regs, sizeof(prgregset_t));
+   size_t bytes = sizeof(ctl.cmd) + sizeof(ctl.arg.regs);
+   ssize_t written = write(ctl_fd, (void *) &ctl, bytes);
+   if ((written < 0) || (written != bytes)) {
+      ERROR(errno, "Failed to write to ctl_fd: PCAGENT.\n");
+      return False;
+   }
+
+   DEBUG(1, "Obtaining agent thread lwpid for process %d.\n", pid);
+
+   char procname[PATH_MAX];
+   snprintf(procname, sizeof(procname),
+            "/proc/%d/lwp/agent/lwpstatus", pid);
+
+   int status_fd = open(procname, O_RDONLY, 0);
+   if (status_fd < 0) {
+      /* Operation failed but there is no way to get rid of the agent
+         thread from outside. We are doomed... */
+      ERROR(errno, "Failed to open file %s.\n", procname);
+      return False;
+   }
+
+   lwpstatus_t status;
+   bytes = read(status_fd, &status, sizeof(status));
+   if ((bytes < 0) || (bytes != sizeof(status))) {
+      ERROR(errno, "Failed to read from %s.\n", procname);
+      close(status_fd);
+      return False;
+   }
+
+   close(status_fd);
+   *agent_lwpid = status.pr_lwpid;
+
+   snprintf(procname, sizeof(procname),
+            "/proc/%d/lwp/agent/lwpctl", pid);
+
+   int agent_ctl_fd = open(procname, O_WRONLY, 0);
+   if (agent_ctl_fd < 0) {
+      /* Resuming failed but there is no way to get rid of the agent
+         thread from outside. We are doomed... */
+      ERROR(errno, "Failed to open file %s.\n", procname);
+      return False;
+   }
+
+   DEBUG(1, "Resuming the agent thread for process %d.\n", pid);
+
+   /* Resume the agent thread. */
+   ctl.cmd = PCRUN;
+   ctl.arg.flags = 0;
+   bytes = sizeof(ctl.cmd) + sizeof(ctl.arg.flags);
+   written = write(agent_ctl_fd, (void *) &ctl, bytes);
+   if ((written < 0) || (written != bytes)) {
+      /* Resuming failed but there is no way to get rid of the agent
+         thread from outside. We are doomed... */
+      ERROR(errno, "Failed to write to agent_ctl_fd: PCRUN 0.\n");
+      close(agent_ctl_fd);
+      return False;
+   }
+
+   DEBUG(1, "Agent thread lwpid %d now running within process %d.\n",
+         *agent_lwpid, pid);
+   close(agent_ctl_fd);
+   return True;
+}
+
+/* Waits until the agent thread running inside the inferior
+   process exits. */
+static Bool wait_for_agent_exit(pid_t pid, id_t agent_lwpid)
+{
+   char procname[PATH_MAX];
+   snprintf(procname, sizeof(procname), "/proc/%d/lwp/agent/lwpctl", pid);
+
+   int agent_ctl_fd = open(procname, O_WRONLY, 0);
+   if (agent_ctl_fd < 0) {
+      if (errno == ENOENT) {
+         DEBUG(1, "Agent control file %s no longer exists. This means "
+               "agent thread %d exited meanwhile.\n",
+               procname, agent_lwpid);
+         return True;
+      }
+      ERROR(errno, "Failed to open agent control file %s.\n", procname);
+      return False;
+   }
+
+   DEBUG(1, "Waiting for agent thread %d to exit.\n", agent_lwpid);
+
+   /* Wait until the agent thread stops. This covers also the case
+      when the thread exited. */
+   ctl_t ctl;
+   ctl.cmd = PCWSTOP;
+   size_t bytes = sizeof(ctl.cmd);
+   ssize_t written = write(agent_ctl_fd, (void *) &ctl, bytes);
+   if ((written < 0) || (written != bytes)) {
+      if (errno == ENOENT) {
+         DEBUG(1, "Agent thread lwpid %d has now exited in process %d.\n",
+                  agent_lwpid, pid);
+      } else {
+         ERROR(errno, "Failed to write to agent_ctl_fd: PCWSTOP.\n");
+         close(agent_ctl_fd);
+         return False;
+      }
+   }
+
+   close(agent_ctl_fd);
+   return True;
+}
+
+Bool invoker_invoke_gdbserver(pid_t pid)
+{
+   if (attach(pid) != True) {
+      return False;
+   }
+
+   prgregset_t regs;
+   if (get_regs(pid, &regs) != True) {
+      detach(pid);
+      return False;
+   }
+
+   if (setup_stack_frame(pid, &regs) != True) {
+      detach(pid);
+      return False;
+   }
+
+   id_t agent_lwpid;
+   if (invoke_agent(pid, &regs, &agent_lwpid) != True) {
+      detach(pid);
+      return False;
+   }
+
+   if (wait_for_agent_exit(pid, agent_lwpid) != True) {
+      detach(pid);
+      return False;
+   }
+
+   detach(pid);
+   return True;
+}
+
+void invoker_cleanup_restore_and_detach(void *v_pid)
+{
+   detach(*(int *) v_pid);
+}
+
+void invoker_restrictions_msg(void)
+{
+}
+
+void invoker_valgrind_dying(void)
+{
+}