blob: 0ac0acfaf18a07f032c250624b429fbe9cee90d7 [file] [log] [blame]
J. Duke319a3b92007-12-01 00:00:00 +00001/*
2 * Copyright 2001 Sun Microsystems, Inc. All Rights Reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation. Sun designates this
8 * particular file as subject to the "Classpath" exception as provided
9 * by Sun in the LICENSE file that accompanied this code.
10 *
11 * This code is distributed in the hope that it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * version 2 for more details (a copy is included in the LICENSE file that
15 * accompanied this code).
16 *
17 * You should have received a copy of the GNU General Public License version
18 * 2 along with this work; if not, write to the Free Software Foundation,
19 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
20 *
21 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
22 * CA 95054 USA or visit www.sun.com if you need additional information or
23 * have any questions.
24 */
25
26#include <stdio.h>
27#include <stdlib.h>
28#include <signal.h>
29#include <pthread.h>
30#include <sys/types.h>
31#include <sys/socket.h>
32#include <sys/time.h>
33#include <sys/resource.h>
34#include <sys/uio.h>
35#include <unistd.h>
36#include <errno.h>
37
38#include <sys/poll.h>
39
40/*
41 * Stack allocated by thread when doing blocking operation
42 */
43typedef struct threadEntry {
44 pthread_t thr; /* this thread */
45 struct threadEntry *next; /* next thread */
46 int intr; /* interrupted */
47} threadEntry_t;
48
49/*
50 * Heap allocated during initialized - one entry per fd
51 */
52typedef struct {
53 pthread_mutex_t lock; /* fd lock */
54 threadEntry_t *threads; /* threads blocked on fd */
55} fdEntry_t;
56
57/*
58 * Signal to unblock thread
59 */
60static int sigWakeup = (__SIGRTMAX - 2);
61
62/*
63 * The fd table and the number of file descriptors
64 */
65static fdEntry_t *fdTable;
66static int fdCount;
67
68/*
69 * Null signal handler
70 */
71static void sig_wakeup(int sig) {
72}
73
74/*
75 * Initialization routine (executed when library is loaded)
76 * Allocate fd tables and sets up signal handler.
77 */
78static void __attribute((constructor)) init() {
79 struct rlimit nbr_files;
80 sigset_t sigset;
81 struct sigaction sa;
82
83 /*
84 * Allocate table based on the maximum number of
85 * file descriptors.
86 */
87 getrlimit(RLIMIT_NOFILE, &nbr_files);
88 fdCount = nbr_files.rlim_max;
89 fdTable = (fdEntry_t *)calloc(fdCount, sizeof(fdEntry_t));
90 if (fdTable == NULL) {
91 fprintf(stderr, "library initialization failed - "
92 "unable to allocate file descriptor table - out of memory");
93 abort();
94 }
95
96 /*
97 * Setup the signal handler
98 */
99 sa.sa_handler = sig_wakeup;
100 sa.sa_flags = 0;
101 sigemptyset(&sa.sa_mask);
102 sigaction(sigWakeup, &sa, NULL);
103
104 sigemptyset(&sigset);
105 sigaddset(&sigset, sigWakeup);
106 sigprocmask(SIG_UNBLOCK, &sigset, NULL);
107}
108
109/*
110 * Return the fd table for this fd or NULL is fd out
111 * of range.
112 */
113static inline fdEntry_t *getFdEntry(int fd)
114{
115 if (fd < 0 || fd > fdCount) {
116 return NULL;
117 }
118 return &fdTable[fd];
119}
120
121/*
122 * Start a blocking operation :-
123 * Insert thread onto thread list for the fd.
124 */
125static inline void startOp(fdEntry_t *fdEntry, threadEntry_t *self)
126{
127 self->thr = pthread_self();
128 self->intr = 0;
129
130 pthread_mutex_lock(&(fdEntry->lock));
131 {
132 self->next = fdEntry->threads;
133 fdEntry->threads = self;
134 }
135 pthread_mutex_unlock(&(fdEntry->lock));
136}
137
138/*
139 * End a blocking operation :-
140 * Remove thread from thread list for the fd
141 * If fd has been interrupted then set errno to EBADF
142 */
143static inline void endOp
144 (fdEntry_t *fdEntry, threadEntry_t *self)
145{
146 int orig_errno = errno;
147 pthread_mutex_lock(&(fdEntry->lock));
148 {
149 threadEntry_t *curr, *prev=NULL;
150 curr = fdEntry->threads;
151 while (curr != NULL) {
152 if (curr == self) {
153 if (curr->intr) {
154 orig_errno = EBADF;
155 }
156 if (prev == NULL) {
157 fdEntry->threads = curr->next;
158 } else {
159 prev->next = curr->next;
160 }
161 break;
162 }
163 prev = curr;
164 curr = curr->next;
165 }
166 }
167 pthread_mutex_unlock(&(fdEntry->lock));
168 errno = orig_errno;
169}
170
171/*
172 * Close or dup2 a file descriptor ensuring that all threads blocked on
173 * the file descriptor are notified via a wakeup signal.
174 *
175 * fd1 < 0 => close(fd2)
176 * fd1 >= 0 => dup2(fd1, fd2)
177 *
178 * Returns -1 with errno set if operation fails.
179 */
180static int closefd(int fd1, int fd2) {
181 int rv, orig_errno;
182 fdEntry_t *fdEntry = getFdEntry(fd2);
183 if (fdEntry == NULL) {
184 errno = EBADF;
185 return -1;
186 }
187
188 /*
189 * Lock the fd to hold-off additional I/O on this fd.
190 */
191 pthread_mutex_lock(&(fdEntry->lock));
192
193 {
194 /*
195 * Send a wakeup signal to all threads blocked on this
196 * file descriptor.
197 */
198 threadEntry_t *curr = fdEntry->threads;
199 while (curr != NULL) {
200 curr->intr = 1;
201 pthread_kill( curr->thr, sigWakeup );
202 curr = curr->next;
203 }
204
205 /*
206 * And close/dup the file descriptor
207 * (restart if interrupted by signal)
208 */
209 do {
210 if (fd1 < 0) {
211 rv = close(fd2);
212 } else {
213 rv = dup2(fd1, fd2);
214 }
215 } while (rv == -1 && errno == EINTR);
216
217 }
218
219 /*
220 * Unlock without destroying errno
221 */
222 orig_errno = errno;
223 pthread_mutex_unlock(&(fdEntry->lock));
224 errno = orig_errno;
225
226 return rv;
227}
228
229/*
230 * Wrapper for dup2 - same semantics as dup2 system call except
231 * that any threads blocked in an I/O system call on fd2 will be
232 * preempted and return -1/EBADF;
233 */
234int NET_Dup2(int fd, int fd2) {
235 if (fd < 0) {
236 errno = EBADF;
237 return -1;
238 }
239 return closefd(fd, fd2);
240}
241
242/*
243 * Wrapper for close - same semantics as close system call
244 * except that any threads blocked in an I/O on fd will be
245 * preempted and the I/O system call will return -1/EBADF.
246 */
247int NET_SocketClose(int fd) {
248 return closefd(-1, fd);
249}
250
251/************** Basic I/O operations here ***************/
252
253/*
254 * Macro to perform a blocking IO operation. Restarts
255 * automatically if interrupted by signal (other than
256 * our wakeup signal)
257 */
258#define BLOCKING_IO_RETURN_INT(FD, FUNC) { \
259 int ret; \
260 threadEntry_t self; \
261 fdEntry_t *fdEntry = getFdEntry(FD); \
262 if (fdEntry == NULL) { \
263 errno = EBADF; \
264 return -1; \
265 } \
266 do { \
267 startOp(fdEntry, &self); \
268 ret = FUNC; \
269 endOp(fdEntry, &self); \
270 } while (ret == -1 && errno == EINTR); \
271 return ret; \
272}
273
274int NET_Read(int s, void* buf, size_t len) {
275 BLOCKING_IO_RETURN_INT( s, recv(s, buf, len, 0) );
276}
277
278int NET_ReadV(int s, const struct iovec * vector, int count) {
279 BLOCKING_IO_RETURN_INT( s, readv(s, vector, count) );
280}
281
282int NET_RecvFrom(int s, void *buf, int len, unsigned int flags,
283 struct sockaddr *from, int *fromlen) {
284 BLOCKING_IO_RETURN_INT( s, recvfrom(s, buf, len, flags, from, fromlen) );
285}
286
287int NET_Send(int s, void *msg, int len, unsigned int flags) {
288 BLOCKING_IO_RETURN_INT( s, send(s, msg, len, flags) );
289}
290
291int NET_WriteV(int s, const struct iovec * vector, int count) {
292 BLOCKING_IO_RETURN_INT( s, writev(s, vector, count) );
293}
294
295int NET_SendTo(int s, const void *msg, int len, unsigned int
296 flags, const struct sockaddr *to, int tolen) {
297 BLOCKING_IO_RETURN_INT( s, sendto(s, msg, len, flags, to, tolen) );
298}
299
300int NET_Accept(int s, struct sockaddr *addr, int *addrlen) {
301 BLOCKING_IO_RETURN_INT( s, accept(s, addr, addrlen) );
302}
303
304int NET_Connect(int s, struct sockaddr *addr, int addrlen) {
305 BLOCKING_IO_RETURN_INT( s, connect(s, addr, addrlen) );
306}
307
308#ifndef USE_SELECT
309int NET_Poll(struct pollfd *ufds, unsigned int nfds, int timeout) {
310 BLOCKING_IO_RETURN_INT( ufds[0].fd, poll(ufds, nfds, timeout) );
311}
312#else
313int NET_Select(int s, fd_set *readfds, fd_set *writefds,
314 fd_set *exceptfds, struct timeval *timeout) {
315 BLOCKING_IO_RETURN_INT( s-1,
316 select(s, readfds, writefds, exceptfds, timeout) );
317}
318#endif
319
320/*
321 * Wrapper for poll(s, timeout).
322 * Auto restarts with adjusted timeout if interrupted by
323 * signal other than our wakeup signal.
324 */
325int NET_Timeout(int s, long timeout) {
326 long prevtime,newtime;
327 struct timeval t;
328 fdEntry_t *fdEntry = getFdEntry(s);
329
330 /*
331 * Check that fd hasn't been closed.
332 */
333 if (fdEntry == NULL) {
334 errno = EBADF;
335 return -1;
336 }
337
338 /*
339 * Pick up current time as may need to adjust timeout
340 */
341 if (timeout > 0) {
342 gettimeofday(&t, NULL);
343 prevtime = t.tv_sec * 1000 + t.tv_usec / 1000;
344 }
345
346 for(;;) {
347 struct pollfd pfd;
348 int rv;
349 threadEntry_t self;
350
351 /*
352 * Poll the fd. If interrupted by our wakeup signal
353 * errno will be set to EBADF.
354 */
355 pfd.fd = s;
356 pfd.events = POLLIN | POLLERR;
357
358 startOp(fdEntry, &self);
359 rv = poll(&pfd, 1, timeout);
360 endOp(fdEntry, &self);
361
362 /*
363 * If interrupted then adjust timeout. If timeout
364 * has expired return 0 (indicating timeout expired).
365 */
366 if (rv < 0 && errno == EINTR) {
367 if (timeout > 0) {
368 gettimeofday(&t, NULL);
369 newtime = t.tv_sec * 1000 + t.tv_usec / 1000;
370 timeout -= newtime - prevtime;
371 if (timeout <= 0) {
372 return 0;
373 }
374 prevtime = newtime;
375 }
376 } else {
377 return rv;
378 }
379
380 }
381}