Implement semaphore functions.


git-svn-id: svn://svn.valgrind.org/valgrind/trunk@295 a5019735-40e9-0310-863c-91ae7b9d1cf9
diff --git a/coregrind/arch/x86-linux/vg_libpthread.c b/coregrind/arch/x86-linux/vg_libpthread.c
index c9956cc..960d301 100644
--- a/coregrind/arch/x86-linux/vg_libpthread.c
+++ b/coregrind/arch/x86-linux/vg_libpthread.c
@@ -1542,6 +1542,153 @@
 
 
 /* ---------------------------------------------------------------------
+   Hacky implementation of semaphores.
+   ------------------------------------------------------------------ */
+
+#include <semaphore.h>
+
+/* This is a terrible way to do the remapping.  Plan is to import an
+   AVL tree at some point. */
+#define VG_N_SEMAPHORES 50
+
+typedef
+   struct {
+      pthread_mutex_t se_mx;
+      pthread_cond_t se_cv;
+      int count;
+   }
+   vg_sem_t;
+
+static pthread_mutex_t se_remap_mx = PTHREAD_MUTEX_INITIALIZER;
+
+static int      se_remap_used = 0;
+static sem_t*   se_remap_orig[VG_N_SEMAPHORES];
+static vg_sem_t se_remap_new[VG_N_SEMAPHORES];
+
+static vg_sem_t* se_remap ( sem_t* orig )
+{
+   int res, i;
+   res = __pthread_mutex_lock(&se_remap_mx);
+   assert(res == 0);
+
+   for (i = 0; i < se_remap_used; i++) {
+      if (se_remap_orig[i] == orig)
+         break;
+   }
+   if (i == se_remap_used) {
+      if (se_remap_used == VG_N_SEMAPHORES) {
+         res = pthread_mutex_unlock(&se_remap_mx);
+         assert(res == 0);
+         barf("N_SEMAPHORES is too low.  Increase and recompile.");
+      }
+      se_remap_used++;
+      se_remap_orig[i] = orig;
+      /* printf("allocated semaphore %d\n", i); */
+   }
+   res = __pthread_mutex_unlock(&se_remap_mx);
+   assert(res == 0);
+   return &se_remap_new[i];
+}
+
+
+int sem_init(sem_t *sem, int pshared, unsigned int value)
+{
+   int       res;
+   vg_sem_t* vg_sem;
+   ensure_valgrind("sem_init");
+   if (pshared != 0) {
+      errno = ENOSYS;
+      return -1;
+   }
+   vg_sem = se_remap(sem);
+   res = pthread_mutex_init(&vg_sem->se_mx, NULL);
+   assert(res == 0);
+   res = pthread_cond_init(&vg_sem->se_cv, NULL);
+   assert(res == 0);
+   vg_sem->count = value;
+   return 0;
+}
+
+
+int sem_wait ( sem_t* sem ) 
+{
+   int       res;
+   vg_sem_t* vg_sem;
+   ensure_valgrind("sem_wait");
+   vg_sem = se_remap(sem);
+   res = __pthread_mutex_lock(&vg_sem->se_mx);
+   assert(res == 0);
+   while (vg_sem->count == 0) {
+      res = pthread_cond_wait(&vg_sem->se_cv, &vg_sem->se_mx);
+      assert(res == 0);
+   }
+   vg_sem->count--;
+   res = __pthread_mutex_unlock(&vg_sem->se_mx);
+   assert(res == 0);
+   return 0;
+}
+
+int sem_post ( sem_t* sem ) 
+{
+   int       res;
+   vg_sem_t* vg_sem; 
+   ensure_valgrind("sem_post");
+   vg_sem = se_remap(sem);
+   res = __pthread_mutex_lock(&vg_sem->se_mx);
+   assert(res == 0);
+   if (vg_sem->count == 0) {
+      vg_sem->count++;
+      res = pthread_cond_broadcast(&vg_sem->se_cv);
+      assert(res == 0);
+   } else {
+      vg_sem->count++;
+   }
+   res = __pthread_mutex_unlock(&vg_sem->se_mx);
+   assert(res == 0);
+   return 0;
+}
+
+
+int sem_trywait ( sem_t* sem ) 
+{
+   int       ret, res;
+   vg_sem_t* vg_sem; 
+   ensure_valgrind("sem_trywait");
+   vg_sem = se_remap(sem);
+   res = __pthread_mutex_lock(&vg_sem->se_mx);
+   assert(res == 0);
+   if (vg_sem->count > 0) { 
+      vg_sem->count--; 
+      ret = 0; 
+   } else { 
+      ret = -1; 
+      errno = EAGAIN; 
+   }
+   res = __pthread_mutex_unlock(&vg_sem->se_mx);
+   assert(res == 0);
+   return ret;
+}
+
+
+int sem_getvalue(sem_t* sem, int * sval)
+{
+   vg_sem_t* vg_sem; 
+   ensure_valgrind("sem_trywait");
+   vg_sem = se_remap(sem);
+   *sval = vg_sem->count;
+   return 0;
+}
+
+
+int sem_destroy(sem_t * sem)
+{
+   kludged("sem_destroy");
+   /* if someone waiting on this semaphore, errno = EBUSY, return -1 */
+   return 0;
+}
+
+
+/* ---------------------------------------------------------------------
    B'stard.
    ------------------------------------------------------------------ */
 
diff --git a/coregrind/docs/manual.html b/coregrind/docs/manual.html
index 607b55b..8007a51 100644
--- a/coregrind/docs/manual.html
+++ b/coregrind/docs/manual.html
@@ -26,7 +26,7 @@
 <a name="title">&nbsp;</a>
 <h1 align=center>Valgrind, snapshot 20020516</h1>
 <center>This manual was majorly updated on 20020501</center>
-<center>This manual was minorly updated on 20020516</center>
+<center>This manual was minorly updated on 20020518</center>
 <p>
 
 <center>
@@ -1206,8 +1206,8 @@
 
 As of late April 02, Valgrind supports programs which use POSIX
 pthreads.  Doing this has proved technically challenging and is still
-in progress, but it works well enough, as of 1 May 02, for significant 
-threaded applications to work.
+in progress, but it works well enough, for significant threaded
+applications to work.
 <p>
 It works as follows: threaded apps are (dynamically) linked against
 <code>libpthread.so</code>.  Usually this is the one installed with
@@ -1229,13 +1229,14 @@
 if you have some kind of concurrency, critical race, locking, or
 similar, bugs.
 <p>
-The current (1 May 02) state of pthread support is as follows.  Please
+The current (18 May 02) state of pthread support is as follows.  Please
 note that things are advancing rapidly, so the situation may have
 improved by the time you read this -- check the web site for further
 updates.
 <ul>
-<li>Mutexes, condition variables, thread-specific data and
-    <code>pthread_once</code> currently work.
+<li>Mutexes, condition variables, thread-specific data,
+    <code>pthread_once</code> and basic semaphore functions
+    (<code>sem_*</code>) currently work.
 <p>
 <li>Various attribute-like calls are handled but ignored.  
     You get a warning message.
@@ -1245,29 +1246,27 @@
     thread without giving it any chance to clean up.  Also, when a
     thread exits, it does not run any cleanup handlers.
 <p>
+<li>Other omissions are: the detachedness state of threads is ignored.
+    This means detached threads hang around and clog up scheduler
+    slots forever when they finish.  Calls for reader-writer locks are
+    have dummy stubs with no functionality right now.  You get a
+    warning message.
+<p>
 <li>Currently the following syscalls are thread-safe (nonblocking):
     <code>write</code> <code>read</code> <code>nanosleep</code>
     <code>sleep</code> <code>select</code> and <code>poll</code>.
 <p>
 <li>The POSIX requirement that each thread have its own
-    signal-blocking mask is not done; the signal handling mechanism is
-    thread-unaware and all signals are delivered to the main thread,
-    antidisirregardless.
+    signal-blocking mask is now implemented.
+    <code>pthread_sigmask</code>, <code>pthread_kill</code>,
+    <code>pthread_sigwait</code> and <code>raise</code> should all now
+    work as POSIX requires.
 </ul>
 
 
-As of 1 May 02, the following programs now work fine on my RedHat 7.2
-box: Opera 6.0Beta2, KNode in KDE 3.0, Mozilla-0.9.2.1 and
-Galeon-0.11.3, both as supplied with RedHat 7.2.
-<p>
-Mozilla 1.0RC1 works fine too, provided that you patch it as described
-here: <a href="http://bugzilla.mozilla.org/show_bug.cgi?id=124335">
-http://bugzilla.mozilla.org/show_bug.cgi?id=124335</a>.  This fixes a
-bug in Mozilla which assumes that memory returned from
-<code>malloc</code> is 8-aligned.  Valgrind's allocator only
-guarantees 4-alignment, so without the patch Mozilla makes an illegal
-memory access, which Valgrind of course spots, and then bombs.
-Mozilla 1.0RC2 works fine out-of-the-box.
+As of 18 May 02, the following threaded programs now work fine on my
+RedHat 7.2 box: Opera 6.0Beta2, KNode in KDE 3.0, Mozilla-0.9.2.1 and
+Galeon-0.11.3, both as supplied with RedHat 7.2.  Also Mozilla 1.0RC2.
 
 
 <a name="install"></a>
diff --git a/coregrind/vg_libpthread.c b/coregrind/vg_libpthread.c
index c9956cc..960d301 100644
--- a/coregrind/vg_libpthread.c
+++ b/coregrind/vg_libpthread.c
@@ -1542,6 +1542,153 @@
 
 
 /* ---------------------------------------------------------------------
+   Hacky implementation of semaphores.
+   ------------------------------------------------------------------ */
+
+#include <semaphore.h>
+
+/* This is a terrible way to do the remapping.  Plan is to import an
+   AVL tree at some point. */
+#define VG_N_SEMAPHORES 50
+
+typedef
+   struct {
+      pthread_mutex_t se_mx;
+      pthread_cond_t se_cv;
+      int count;
+   }
+   vg_sem_t;
+
+static pthread_mutex_t se_remap_mx = PTHREAD_MUTEX_INITIALIZER;
+
+static int      se_remap_used = 0;
+static sem_t*   se_remap_orig[VG_N_SEMAPHORES];
+static vg_sem_t se_remap_new[VG_N_SEMAPHORES];
+
+static vg_sem_t* se_remap ( sem_t* orig )
+{
+   int res, i;
+   res = __pthread_mutex_lock(&se_remap_mx);
+   assert(res == 0);
+
+   for (i = 0; i < se_remap_used; i++) {
+      if (se_remap_orig[i] == orig)
+         break;
+   }
+   if (i == se_remap_used) {
+      if (se_remap_used == VG_N_SEMAPHORES) {
+         res = pthread_mutex_unlock(&se_remap_mx);
+         assert(res == 0);
+         barf("N_SEMAPHORES is too low.  Increase and recompile.");
+      }
+      se_remap_used++;
+      se_remap_orig[i] = orig;
+      /* printf("allocated semaphore %d\n", i); */
+   }
+   res = __pthread_mutex_unlock(&se_remap_mx);
+   assert(res == 0);
+   return &se_remap_new[i];
+}
+
+
+int sem_init(sem_t *sem, int pshared, unsigned int value)
+{
+   int       res;
+   vg_sem_t* vg_sem;
+   ensure_valgrind("sem_init");
+   if (pshared != 0) {
+      errno = ENOSYS;
+      return -1;
+   }
+   vg_sem = se_remap(sem);
+   res = pthread_mutex_init(&vg_sem->se_mx, NULL);
+   assert(res == 0);
+   res = pthread_cond_init(&vg_sem->se_cv, NULL);
+   assert(res == 0);
+   vg_sem->count = value;
+   return 0;
+}
+
+
+int sem_wait ( sem_t* sem ) 
+{
+   int       res;
+   vg_sem_t* vg_sem;
+   ensure_valgrind("sem_wait");
+   vg_sem = se_remap(sem);
+   res = __pthread_mutex_lock(&vg_sem->se_mx);
+   assert(res == 0);
+   while (vg_sem->count == 0) {
+      res = pthread_cond_wait(&vg_sem->se_cv, &vg_sem->se_mx);
+      assert(res == 0);
+   }
+   vg_sem->count--;
+   res = __pthread_mutex_unlock(&vg_sem->se_mx);
+   assert(res == 0);
+   return 0;
+}
+
+int sem_post ( sem_t* sem ) 
+{
+   int       res;
+   vg_sem_t* vg_sem; 
+   ensure_valgrind("sem_post");
+   vg_sem = se_remap(sem);
+   res = __pthread_mutex_lock(&vg_sem->se_mx);
+   assert(res == 0);
+   if (vg_sem->count == 0) {
+      vg_sem->count++;
+      res = pthread_cond_broadcast(&vg_sem->se_cv);
+      assert(res == 0);
+   } else {
+      vg_sem->count++;
+   }
+   res = __pthread_mutex_unlock(&vg_sem->se_mx);
+   assert(res == 0);
+   return 0;
+}
+
+
+int sem_trywait ( sem_t* sem ) 
+{
+   int       ret, res;
+   vg_sem_t* vg_sem; 
+   ensure_valgrind("sem_trywait");
+   vg_sem = se_remap(sem);
+   res = __pthread_mutex_lock(&vg_sem->se_mx);
+   assert(res == 0);
+   if (vg_sem->count > 0) { 
+      vg_sem->count--; 
+      ret = 0; 
+   } else { 
+      ret = -1; 
+      errno = EAGAIN; 
+   }
+   res = __pthread_mutex_unlock(&vg_sem->se_mx);
+   assert(res == 0);
+   return ret;
+}
+
+
+int sem_getvalue(sem_t* sem, int * sval)
+{
+   vg_sem_t* vg_sem; 
+   ensure_valgrind("sem_trywait");
+   vg_sem = se_remap(sem);
+   *sval = vg_sem->count;
+   return 0;
+}
+
+
+int sem_destroy(sem_t * sem)
+{
+   kludged("sem_destroy");
+   /* if someone waiting on this semaphore, errno = EBUSY, return -1 */
+   return 0;
+}
+
+
+/* ---------------------------------------------------------------------
    B'stard.
    ------------------------------------------------------------------ */