blob: 2d504050c084bce399a5548678e5c0c5d1f5f71a [file] [log] [blame]
Kristian Monsen5ab50182010-05-14 18:53:44 +01001/***************************************************************************
2 * _ _ ____ _
3 * Project ___| | | | _ \| |
4 * / __| | | | |_) | |
5 * | (__| |_| | _ <| |___
6 * \___|\___/|_| \_\_____|
7 *
8 * Copyright (C) 1998 - 2010, Daniel Stenberg, <daniel@haxx.se>, et al.
9 *
10 * This software is licensed as described in the file COPYING, which
11 * you should have received as part of this distribution. The terms
12 * are also available at http://curl.haxx.se/docs/copyright.html.
13 *
14 * You may opt to use, copy, modify, merge, publish, distribute and/or sell
15 * copies of the Software, and permit persons to whom the Software is
16 * furnished to do so, under the terms of the COPYING file.
17 *
18 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
19 * KIND, either express or implied.
20 *
21 ***************************************************************************/
22
23#include "setup.h"
24
25/* -- WIN32 approved -- */
26#include <stdio.h>
27#include <string.h>
28#include <stdarg.h>
29#include <stdlib.h>
30#include <ctype.h>
31#include <errno.h>
32
33#include "strtoofft.h"
34#include "strequal.h"
35#include "rawstr.h"
36
37#ifdef WIN32
38#include <time.h>
39#include <io.h>
40#else
41#ifdef HAVE_SYS_SOCKET_H
42#include <sys/socket.h>
43#endif
44#ifdef HAVE_NETINET_IN_H
45#include <netinet/in.h>
46#endif
47#ifdef HAVE_SYS_TIME_H
48#include <sys/time.h>
49#endif
50#ifdef HAVE_UNISTD_H
51#include <unistd.h>
52#endif
53#ifdef HAVE_NETDB_H
54#include <netdb.h>
55#endif
56#ifdef HAVE_ARPA_INET_H
57#include <arpa/inet.h>
58#endif
59#ifdef HAVE_NET_IF_H
60#include <net/if.h>
61#endif
62#ifdef HAVE_SYS_IOCTL_H
63#include <sys/ioctl.h>
64#endif
65#ifdef HAVE_SIGNAL_H
66#include <signal.h>
67#endif
68
69#ifdef HAVE_SYS_PARAM_H
70#include <sys/param.h>
71#endif
72
73#ifdef HAVE_SYS_SELECT_H
74#include <sys/select.h>
75#endif
76
77#ifndef HAVE_SOCKET
78#error "We can't compile without socket() support!"
79#endif
80
81#endif /* WIN32 */
82
83#include "urldata.h"
84#include <curl/curl.h>
85#include "netrc.h"
86
87#include "content_encoding.h"
88#include "hostip.h"
89#include "transfer.h"
90#include "sendf.h"
91#include "speedcheck.h"
92#include "progress.h"
93#include "http.h"
94#include "url.h"
95#include "getinfo.h"
96#include "sslgen.h"
97#include "http_digest.h"
98#include "http_ntlm.h"
99#include "http_negotiate.h"
100#include "share.h"
101#include "curl_memory.h"
102#include "select.h"
103#include "multiif.h"
104#include "easyif.h" /* for Curl_convert_to_network prototype */
105#include "rtsp.h"
106
107#define _MPRINTF_REPLACE /* use our functions only */
108#include <curl/mprintf.h>
109
110/* The last #include file should be: */
111#include "memdebug.h"
112
113#define CURL_TIMEOUT_EXPECT_100 1000 /* counting ms here */
114
115/*
116 * This function will call the read callback to fill our buffer with data
117 * to upload.
118 */
119CURLcode Curl_fillreadbuffer(struct connectdata *conn, int bytes, int *nreadp)
120{
121 struct SessionHandle *data = conn->data;
122 size_t buffersize = (size_t)bytes;
123 int nread;
124#ifdef CURL_DOES_CONVERSIONS
125 bool sending_http_headers = FALSE;
126
127 if((conn->protocol&(PROT_HTTP|PROT_RTSP)) &&
128 (data->state.proto.http->sending == HTTPSEND_REQUEST)) {
129 /* We're sending the HTTP request headers, not the data.
130 Remember that so we don't re-translate them into garbage. */
131 sending_http_headers = TRUE;
132 }
133#endif
134
135 if(data->req.upload_chunky) {
136 /* if chunked Transfer-Encoding */
137 buffersize -= (8 + 2 + 2); /* 32bit hex + CRLF + CRLF */
138 data->req.upload_fromhere += (8 + 2); /* 32bit hex + CRLF */
139 }
140
141 /* this function returns a size_t, so we typecast to int to prevent warnings
142 with picky compilers */
143 nread = (int)conn->fread_func(data->req.upload_fromhere, 1,
144 buffersize, conn->fread_in);
145
146 if(nread == CURL_READFUNC_ABORT) {
147 failf(data, "operation aborted by callback");
148 *nreadp = 0;
149 return CURLE_ABORTED_BY_CALLBACK;
150 }
151 else if(nread == CURL_READFUNC_PAUSE) {
152 struct SingleRequest *k = &data->req;
153 /* CURL_READFUNC_PAUSE pauses read callbacks that feed socket writes */
154 k->keepon |= KEEP_SEND_PAUSE; /* mark socket send as paused */
155 if(data->req.upload_chunky) {
156 /* Back out the preallocation done above */
157 data->req.upload_fromhere -= (8 + 2);
158 }
159 *nreadp = 0;
160 return CURLE_OK; /* nothing was read */
161 }
162 else if((size_t)nread > buffersize) {
163 /* the read function returned a too large value */
164 *nreadp = 0;
165 failf(data, "read function returned funny value");
166 return CURLE_READ_ERROR;
167 }
168
169 if(!data->req.forbidchunk && data->req.upload_chunky) {
170 /* if chunked Transfer-Encoding
171 * build chunk:
172 *
173 * <HEX SIZE> CRLF
174 * <DATA> CRLF
175 */
176 /* On non-ASCII platforms the <DATA> may or may not be
177 translated based on set.prefer_ascii while the protocol
178 portion must always be translated to the network encoding.
179 To further complicate matters, line end conversion might be
180 done later on, so we need to prevent CRLFs from becoming
181 CRCRLFs if that's the case. To do this we use bare LFs
182 here, knowing they'll become CRLFs later on.
183 */
184
185 char hexbuffer[11];
186 const char *endofline_native;
187 const char *endofline_network;
188 int hexlen;
189#ifdef CURL_DO_LINEEND_CONV
190 if((data->set.crlf) || (data->set.prefer_ascii)) {
191#else
192 if(data->set.crlf) {
193#endif /* CURL_DO_LINEEND_CONV */
194 /* \n will become \r\n later on */
195 endofline_native = "\n";
196 endofline_network = "\x0a";
197 } else {
198 endofline_native = "\r\n";
199 endofline_network = "\x0d\x0a";
200 }
201 hexlen = snprintf(hexbuffer, sizeof(hexbuffer),
202 "%x%s", nread, endofline_native);
203
204 /* move buffer pointer */
205 data->req.upload_fromhere -= hexlen;
206 nread += hexlen;
207
208 /* copy the prefix to the buffer, leaving out the NUL */
209 memcpy(data->req.upload_fromhere, hexbuffer, hexlen);
210
211 /* always append ASCII CRLF to the data */
212 memcpy(data->req.upload_fromhere + nread,
213 endofline_network,
214 strlen(endofline_network));
215
216#ifdef CURL_DOES_CONVERSIONS
217 CURLcode res;
218 int length;
219 if(data->set.prefer_ascii) {
220 /* translate the protocol and data */
221 length = nread;
222 } else {
223 /* just translate the protocol portion */
224 length = strlen(hexbuffer);
225 }
226 res = Curl_convert_to_network(data, data->req.upload_fromhere, length);
227 /* Curl_convert_to_network calls failf if unsuccessful */
228 if(res != CURLE_OK) {
229 return(res);
230 }
231#endif /* CURL_DOES_CONVERSIONS */
232
233 if((nread - hexlen) == 0) {
234 /* mark this as done once this chunk is transfered */
235 data->req.upload_done = TRUE;
236 }
237
238 nread+=(int)strlen(endofline_native); /* for the added end of line */
239 }
240#ifdef CURL_DOES_CONVERSIONS
241 else if((data->set.prefer_ascii) && (!sending_http_headers)) {
242 CURLcode res;
243 res = Curl_convert_to_network(data, data->req.upload_fromhere, nread);
244 /* Curl_convert_to_network calls failf if unsuccessful */
245 if(res != CURLE_OK)
246 return(res);
247 }
248#endif /* CURL_DOES_CONVERSIONS */
249
250 *nreadp = nread;
251
252 return CURLE_OK;
253}
254
255
256/*
257 * Curl_readrewind() rewinds the read stream. This is typically used for HTTP
258 * POST/PUT with multi-pass authentication when a sending was denied and a
259 * resend is necessary.
260 */
261CURLcode Curl_readrewind(struct connectdata *conn)
262{
263 struct SessionHandle *data = conn->data;
264
265 conn->bits.rewindaftersend = FALSE; /* we rewind now */
266
267 /* explicitly switch off sending data on this connection now since we are
268 about to restart a new transfer and thus we want to avoid inadvertently
269 sending more data on the existing connection until the next transfer
270 starts */
271 data->req.keepon &= ~KEEP_SEND;
272
273 /* We have sent away data. If not using CURLOPT_POSTFIELDS or
274 CURLOPT_HTTPPOST, call app to rewind
275 */
276 if(data->set.postfields ||
277 (data->set.httpreq == HTTPREQ_POST_FORM))
278 ; /* do nothing */
279 else {
280 if(data->set.seek_func) {
281 int err;
282
283 err = (data->set.seek_func)(data->set.seek_client, 0, SEEK_SET);
284 if(err) {
285 failf(data, "seek callback returned error %d", (int)err);
286 return CURLE_SEND_FAIL_REWIND;
287 }
288 }
289 else if(data->set.ioctl_func) {
290 curlioerr err;
291
292 err = (data->set.ioctl_func)(data, CURLIOCMD_RESTARTREAD,
293 data->set.ioctl_client);
294 infof(data, "the ioctl callback returned %d\n", (int)err);
295
296 if(err) {
297 /* FIXME: convert to a human readable error message */
298 failf(data, "ioctl callback returned error %d", (int)err);
299 return CURLE_SEND_FAIL_REWIND;
300 }
301 }
302 else {
303 /* If no CURLOPT_READFUNCTION is used, we know that we operate on a
304 given FILE * stream and we can actually attempt to rewind that
305 ourself with fseek() */
306 if(data->set.fread_func == (curl_read_callback)fread) {
307 if(-1 != fseek(data->set.in, 0, SEEK_SET))
308 /* successful rewind */
309 return CURLE_OK;
310 }
311
312 /* no callback set or failure above, makes us fail at once */
313 failf(data, "necessary data rewind wasn't possible");
314 return CURLE_SEND_FAIL_REWIND;
315 }
316 }
317 return CURLE_OK;
318}
319
320static int data_pending(const struct connectdata *conn)
321{
322 /* in the case of libssh2, we can never be really sure that we have emptied
323 its internal buffers so we MUST always try until we get EAGAIN back */
324 return conn->protocol&(PROT_SCP|PROT_SFTP) ||
325 Curl_ssl_data_pending(conn, FIRSTSOCKET);
326}
327
328static void read_rewind(struct connectdata *conn,
329 size_t thismuch)
330{
331 DEBUGASSERT(conn->read_pos >= thismuch);
332
333 conn->read_pos -= thismuch;
334 conn->bits.stream_was_rewound = TRUE;
335
336#ifdef DEBUGBUILD
337 {
338 char buf[512 + 1];
339 size_t show;
340
341 show = CURLMIN(conn->buf_len - conn->read_pos, sizeof(buf)-1);
342 if(conn->master_buffer) {
343 memcpy(buf, conn->master_buffer + conn->read_pos, show);
344 buf[show] = '\0';
345 }
346 else {
347 buf[0] = '\0';
348 }
349
350 DEBUGF(infof(conn->data,
351 "Buffer after stream rewind (read_pos = %zu): [%s]",
352 conn->read_pos, buf));
353 }
354#endif
355}
356
357
358/*
359 * Go ahead and do a read if we have a readable socket or if
360 * the stream was rewound (in which case we have data in a
361 * buffer)
362 */
363static CURLcode readwrite_data(struct SessionHandle *data,
364 struct connectdata *conn,
365 struct SingleRequest *k,
366 int *didwhat, bool *done)
367{
368 CURLcode result = CURLE_OK;
369 ssize_t nread; /* number of bytes read */
370 size_t excess = 0; /* excess bytes read */
371 bool is_empty_data = FALSE;
372#ifndef CURL_DISABLE_RTSP
373 bool readmore = FALSE; /* used by RTP to signal for more data */
374#endif
375
376 *done = FALSE;
377
378 /* This is where we loop until we have read everything there is to
379 read or we get a EWOULDBLOCK */
380 do {
381 size_t buffersize = data->set.buffer_size?
382 data->set.buffer_size : BUFSIZE;
383 size_t bytestoread = buffersize;
384 int readrc;
385
386 if(k->size != -1 && !k->header) {
387 /* make sure we don't read "too much" if we can help it since we
388 might be pipelining and then someone else might want to read what
389 follows! */
390 curl_off_t totalleft = k->size - k->bytecount;
391 if(totalleft < (curl_off_t)bytestoread)
392 bytestoread = (size_t)totalleft;
393 }
394
395 if(bytestoread) {
396 /* receive data from the network! */
397 readrc = Curl_read(conn, conn->sockfd, k->buf, bytestoread, &nread);
398
399 /* subzero, this would've blocked */
400 if(0 > readrc)
401 break; /* get out of loop */
402
403 /* get the CURLcode from the int */
404 result = (CURLcode)readrc;
405
406 if(result>0)
407 return result;
408 }
409 else {
410 /* read nothing but since we wanted nothing we consider this an OK
411 situation to proceed from */
412 nread = 0;
413 }
414
415 if((k->bytecount == 0) && (k->writebytecount == 0)) {
416 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
417 if(k->exp100 > EXP100_SEND_DATA)
418 /* set time stamp to compare with when waiting for the 100 */
419 k->start100 = Curl_tvnow();
420 }
421
422 *didwhat |= KEEP_RECV;
423 /* indicates data of zero size, i.e. empty file */
424 is_empty_data = (bool)((nread == 0) && (k->bodywrites == 0));
425
426 /* NUL terminate, allowing string ops to be used */
427 if(0 < nread || is_empty_data) {
428 k->buf[nread] = 0;
429 }
430 else if(0 >= nread) {
431 /* if we receive 0 or less here, the server closed the connection
432 and we bail out from this! */
433 DEBUGF(infof(data, "nread <= 0, server closed connection, bailing\n"));
434 k->keepon &= ~KEEP_RECV;
435 break;
436 }
437
438 /* Default buffer to use when we write the buffer, it may be changed
439 in the flow below before the actual storing is done. */
440 k->str = k->buf;
441
442#ifndef CURL_DISABLE_RTSP
443 /* Check for RTP at the beginning of the data */
444 if(conn->protocol & PROT_RTSP) {
445 result = Curl_rtsp_rtp_readwrite(data, conn, &nread, &readmore);
446 if(result)
447 return result;
448 if(readmore)
449 break;
450 }
451#endif
452
453#ifndef CURL_DISABLE_HTTP
454 /* Since this is a two-state thing, we check if we are parsing
455 headers at the moment or not. */
456 if(k->header) {
457 /* we are in parse-the-header-mode */
458 bool stop_reading = FALSE;
459 result = Curl_http_readwrite_headers(data, conn, &nread, &stop_reading);
460 if(result)
461 return result;
462
463#ifndef CURL_DISABLE_RTSP
464 /* Check for RTP after the headers if there is no Content */
465 if(k->maxdownload <= 0 && nread > 0 && (conn->protocol & PROT_RTSP)) {
466 result = Curl_rtsp_rtp_readwrite(data, conn, &nread, &readmore);
467 if(result)
468 return result;
469 if(readmore)
470 break;
471 }
472#endif
473
474 if(stop_reading)
475 /* We've stopped dealing with input, get out of the do-while loop */
476 break;
477 }
478#endif /* CURL_DISABLE_HTTP */
479
480
481 /* This is not an 'else if' since it may be a rest from the header
482 parsing, where the beginning of the buffer is headers and the end
483 is non-headers. */
484 if(k->str && !k->header && (nread > 0 || is_empty_data)) {
485
486
487#ifndef CURL_DISABLE_HTTP
488 if(0 == k->bodywrites && !is_empty_data) {
489 /* These checks are only made the first time we are about to
490 write a piece of the body */
491 if(conn->protocol&(PROT_HTTP|PROT_RTSP)) {
492 /* HTTP-only checks */
493
494 if(data->req.newurl) {
495 if(conn->bits.close) {
496 /* Abort after the headers if "follow Location" is set
497 and we're set to close anyway. */
498 k->keepon &= ~KEEP_RECV;
499 *done = TRUE;
500 return CURLE_OK;
501 }
502 /* We have a new url to load, but since we want to be able
503 to re-use this connection properly, we read the full
504 response in "ignore more" */
505 k->ignorebody = TRUE;
506 infof(data, "Ignoring the response-body\n");
507 }
508 if(data->state.resume_from && !k->content_range &&
509 (data->set.httpreq==HTTPREQ_GET) &&
510 !k->ignorebody) {
511 /* we wanted to resume a download, although the server doesn't
512 * seem to support this and we did this with a GET (if it
513 * wasn't a GET we did a POST or PUT resume) */
514 failf(data, "HTTP server doesn't seem to support "
515 "byte ranges. Cannot resume.");
516 return CURLE_RANGE_ERROR;
517 }
518
519 if(data->set.timecondition && !data->state.range) {
520 /* A time condition has been set AND no ranges have been
521 requested. This seems to be what chapter 13.3.4 of
522 RFC 2616 defines to be the correct action for a
523 HTTP/1.1 client */
524 if((k->timeofdoc > 0) && (data->set.timevalue > 0)) {
525 switch(data->set.timecondition) {
526 case CURL_TIMECOND_IFMODSINCE:
527 default:
528 if(k->timeofdoc < data->set.timevalue) {
529 infof(data,
530 "The requested document is not new enough\n");
531 *done = TRUE;
532 data->info.timecond = TRUE;
533 return CURLE_OK;
534 }
535 break;
536 case CURL_TIMECOND_IFUNMODSINCE:
537 if(k->timeofdoc > data->set.timevalue) {
538 infof(data,
539 "The requested document is not old enough\n");
540 *done = TRUE;
541 data->info.timecond = TRUE;
542 return CURLE_OK;
543 }
544 break;
545 } /* switch */
546 } /* two valid time strings */
547 } /* we have a time condition */
548
549 } /* this is HTTP */
550 } /* this is the first time we write a body part */
551#endif /* CURL_DISABLE_HTTP */
552 k->bodywrites++;
553
554 /* pass data to the debug function before it gets "dechunked" */
555 if(data->set.verbose) {
556 if(k->badheader) {
557 Curl_debug(data, CURLINFO_DATA_IN, data->state.headerbuff,
558 (size_t)k->hbuflen, conn);
559 if(k->badheader == HEADER_PARTHEADER)
560 Curl_debug(data, CURLINFO_DATA_IN,
561 k->str, (size_t)nread, conn);
562 }
563 else
564 Curl_debug(data, CURLINFO_DATA_IN,
565 k->str, (size_t)nread, conn);
566 }
567
568#ifndef CURL_DISABLE_HTTP
569 if(k->chunk) {
570 /*
571 * Here comes a chunked transfer flying and we need to decode this
572 * properly. While the name says read, this function both reads
573 * and writes away the data. The returned 'nread' holds the number
574 * of actual data it wrote to the client.
575 */
576
577 CHUNKcode res =
578 Curl_httpchunk_read(conn, k->str, nread, &nread);
579
580 if(CHUNKE_OK < res) {
581 if(CHUNKE_WRITE_ERROR == res) {
582 failf(data, "Failed writing data");
583 return CURLE_WRITE_ERROR;
584 }
585 failf(data, "Received problem %d in the chunky parser", (int)res);
586 return CURLE_RECV_ERROR;
587 }
588 else if(CHUNKE_STOP == res) {
589 size_t dataleft;
590 /* we're done reading chunks! */
591 k->keepon &= ~KEEP_RECV; /* read no more */
592
593 /* There are now possibly N number of bytes at the end of the
594 str buffer that weren't written to the client.
595
596 We DO care about this data if we are pipelining.
597 Push it back to be read on the next pass. */
598
599 dataleft = conn->chunk.dataleft;
600 if(dataleft != 0) {
601 infof(conn->data, "Leftovers after chunking. "
602 " Rewinding %zu bytes\n",dataleft);
603 read_rewind(conn, dataleft);
604 }
605 }
606 /* If it returned OK, we just keep going */
607 }
608#endif /* CURL_DISABLE_HTTP */
609
610 /* Account for body content stored in the header buffer */
611 if(k->badheader && !k->ignorebody) {
612 DEBUGF(infof(data, "Increasing bytecount by %zu from hbuflen\n",
613 k->hbuflen));
614 k->bytecount += k->hbuflen;
615 }
616
617 if((-1 != k->maxdownload) &&
618 (k->bytecount + nread >= k->maxdownload)) {
619
620 excess = (size_t)(k->bytecount + nread - k->maxdownload);
621 if(excess > 0 && !k->ignorebody) {
622 if(conn->data->multi && Curl_multi_canPipeline(conn->data->multi)) {
623 /* The 'excess' amount below can't be more than BUFSIZE which
624 always will fit in a size_t */
625 infof(data,
626 "Rewinding stream by : %zu"
627 " bytes on url %s (size = %" FORMAT_OFF_T
628 ", maxdownload = %" FORMAT_OFF_T
629 ", bytecount = %" FORMAT_OFF_T ", nread = %zd)\n",
630 excess, data->state.path,
631 k->size, k->maxdownload, k->bytecount, nread);
632 read_rewind(conn, excess);
633 }
634 else {
635 infof(data,
636 "Excess found in a non pipelined read:"
637 " excess = %zu"
638 ", size = %" FORMAT_OFF_T
639 ", maxdownload = %" FORMAT_OFF_T
640 ", bytecount = %" FORMAT_OFF_T "\n",
641 excess, k->size, k->maxdownload, k->bytecount);
642 }
643 }
644
645 nread = (ssize_t) (k->maxdownload - k->bytecount);
646 if(nread < 0 ) /* this should be unusual */
647 nread = 0;
648
649 k->keepon &= ~KEEP_RECV; /* we're done reading */
650 }
651
652 k->bytecount += nread;
653
654 Curl_pgrsSetDownloadCounter(data, k->bytecount);
655
656 if(!k->chunk && (nread || k->badheader || is_empty_data)) {
657 /* If this is chunky transfer, it was already written */
658
659 if(k->badheader && !k->ignorebody) {
660 /* we parsed a piece of data wrongly assuming it was a header
661 and now we output it as body instead */
662
663 /* Don't let excess data pollute body writes */
664 if(k->maxdownload == -1 || (curl_off_t)k->hbuflen <= k->maxdownload)
665 result = Curl_client_write(conn, CLIENTWRITE_BODY,
666 data->state.headerbuff,
667 k->hbuflen);
668 else
669 result = Curl_client_write(conn, CLIENTWRITE_BODY,
670 data->state.headerbuff,
671 (size_t)k->maxdownload);
672
673 if(result)
674 return result;
675 }
676 if(k->badheader < HEADER_ALLBAD) {
677 /* This switch handles various content encodings. If there's an
678 error here, be sure to check over the almost identical code
679 in http_chunks.c.
680 Make sure that ALL_CONTENT_ENCODINGS contains all the
681 encodings handled here. */
682#ifdef HAVE_LIBZ
683 switch (conn->data->set.http_ce_skip ?
684 IDENTITY : k->content_encoding) {
685 case IDENTITY:
686#endif
687 /* This is the default when the server sends no
688 Content-Encoding header. See Curl_readwrite_init; the
689 memset() call initializes k->content_encoding to zero. */
690 if(!k->ignorebody) {
691
692#ifndef CURL_DISABLE_POP3
693 if(conn->protocol&PROT_POP3)
694 result = Curl_pop3_write(conn, k->str, nread);
695 else
696#endif /* CURL_DISABLE_POP3 */
697
698 result = Curl_client_write(conn, CLIENTWRITE_BODY, k->str,
699 nread);
700 }
701#ifdef HAVE_LIBZ
702 break;
703
704 case DEFLATE:
705 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
706 if(!k->ignorebody)
707 result = Curl_unencode_deflate_write(conn, k, nread);
708 break;
709
710 case GZIP:
711 /* Assume CLIENTWRITE_BODY; headers are not encoded. */
712 if(!k->ignorebody)
713 result = Curl_unencode_gzip_write(conn, k, nread);
714 break;
715
716 case COMPRESS:
717 default:
718 failf (data, "Unrecognized content encoding type. "
719 "libcurl understands `identity', `deflate' and `gzip' "
720 "content encodings.");
721 result = CURLE_BAD_CONTENT_ENCODING;
722 break;
723 }
724#endif
725 }
726 k->badheader = HEADER_NORMAL; /* taken care of now */
727
728 if(result)
729 return result;
730 }
731
732 } /* if(! header and data to read ) */
733
734#ifndef CURL_DISABLE_RTSP
735 if(excess > 0 && !conn->bits.stream_was_rewound &&
736 (conn->protocol & PROT_RTSP)) {
737 /* Check for RTP after the content if there is unrewound excess */
738
739 /* Parse the excess data */
740 k->str += nread;
741 nread = excess;
742
743 result = Curl_rtsp_rtp_readwrite(data, conn, &nread, &readmore);
744 if(result)
745 return result;
746
747 if(readmore)
748 k->keepon |= KEEP_RECV; /* we're not done reading */
749 break;
750 }
751#endif
752
753 if(is_empty_data) {
754 /* if we received nothing, the server closed the connection and we
755 are done */
756 k->keepon &= ~KEEP_RECV;
757 }
758
759 } while(data_pending(conn));
760
761 if(((k->keepon & (KEEP_RECV|KEEP_SEND)) == KEEP_SEND) &&
762 conn->bits.close ) {
763 /* When we've read the entire thing and the close bit is set, the server
764 may now close the connection. If there's now any kind of sending going
765 on from our side, we need to stop that immediately. */
766 infof(data, "we are done reading and this is set to close, stop send\n");
767 k->keepon &= ~KEEP_SEND; /* no writing anymore either */
768 }
769
770 return CURLE_OK;
771}
772
773/*
774 * Send data to upload to the server, when the socket is writable.
775 */
776static CURLcode readwrite_upload(struct SessionHandle *data,
777 struct connectdata *conn,
778 struct SingleRequest *k,
779 int *didwhat)
780{
781 ssize_t i, si;
782 ssize_t bytes_written;
783 CURLcode result;
784 ssize_t nread; /* number of bytes read */
785 bool sending_http_headers = FALSE;
786
787 if((k->bytecount == 0) && (k->writebytecount == 0))
788 Curl_pgrsTime(data, TIMER_STARTTRANSFER);
789
790 *didwhat |= KEEP_SEND;
791
792 /*
793 * We loop here to do the READ and SEND loop until we run out of
794 * data to send or until we get EWOULDBLOCK back
795 */
796 do {
797
798 /* only read more data if there's no upload data already
799 present in the upload buffer */
800 if(0 == data->req.upload_present) {
801 /* init the "upload from here" pointer */
802 data->req.upload_fromhere = k->uploadbuf;
803
804 if(!k->upload_done) {
805 /* HTTP pollution, this should be written nicer to become more
806 protocol agnostic. */
807 int fillcount;
808
809 if((k->exp100 == EXP100_SENDING_REQUEST) &&
810 (data->state.proto.http->sending == HTTPSEND_BODY)) {
811 /* If this call is to send body data, we must take some action:
812 We have sent off the full HTTP 1.1 request, and we shall now
813 go into the Expect: 100 state and await such a header */
814 k->exp100 = EXP100_AWAITING_CONTINUE; /* wait for the header */
815 k->keepon &= ~KEEP_SEND; /* disable writing */
816 k->start100 = Curl_tvnow(); /* timeout count starts now */
817 *didwhat &= ~KEEP_SEND; /* we didn't write anything actually */
818 break;
819 }
820
821 if(conn->protocol&(PROT_HTTP|PROT_RTSP)) {
822 if(data->state.proto.http->sending == HTTPSEND_REQUEST)
823 /* We're sending the HTTP request headers, not the data.
824 Remember that so we don't change the line endings. */
825 sending_http_headers = TRUE;
826 else
827 sending_http_headers = FALSE;
828 }
829
830 result = Curl_fillreadbuffer(conn, BUFSIZE, &fillcount);
831 if(result)
832 return result;
833
834 nread = (ssize_t)fillcount;
835 }
836 else
837 nread = 0; /* we're done uploading/reading */
838
839 if(!nread && (k->keepon & KEEP_SEND_PAUSE)) {
840 /* this is a paused transfer */
841 break;
842 }
843 else if(nread<=0) {
844 /* done */
845 k->keepon &= ~KEEP_SEND; /* we're done writing */
846
847 if(conn->bits.rewindaftersend) {
848 result = Curl_readrewind(conn);
849 if(result)
850 return result;
851 }
852 break;
853 }
854
855 /* store number of bytes available for upload */
856 data->req.upload_present = nread;
857
858#ifndef CURL_DISABLE_SMTP
859 if(conn->protocol & PROT_SMTP) {
860 result = Curl_smtp_escape_eob(conn, nread);
861 if(result)
862 return result;
863 }
864 else
865#endif /* CURL_DISABLE_SMTP */
866
867 /* convert LF to CRLF if so asked */
868 if((!sending_http_headers) &&
869#ifdef CURL_DO_LINEEND_CONV
870 /* always convert if we're FTPing in ASCII mode */
871 ((data->set.crlf) || (data->set.prefer_ascii))) {
872#else
873 (data->set.crlf)) {
874#endif
875 if(data->state.scratch == NULL)
876 data->state.scratch = malloc(2*BUFSIZE);
877 if(data->state.scratch == NULL) {
878 failf (data, "Failed to alloc scratch buffer!");
879 return CURLE_OUT_OF_MEMORY;
880 }
881 /*
882 * ASCII/EBCDIC Note: This is presumably a text (not binary)
883 * transfer so the data should already be in ASCII.
884 * That means the hex values for ASCII CR (0x0d) & LF (0x0a)
885 * must be used instead of the escape sequences \r & \n.
886 */
887 for(i = 0, si = 0; i < nread; i++, si++) {
888 if(data->req.upload_fromhere[i] == 0x0a) {
889 data->state.scratch[si++] = 0x0d;
890 data->state.scratch[si] = 0x0a;
891 if(!data->set.crlf) {
892 /* we're here only because FTP is in ASCII mode...
893 bump infilesize for the LF we just added */
894 data->set.infilesize++;
895 }
896 }
897 else
898 data->state.scratch[si] = data->req.upload_fromhere[i];
899 }
900 if(si != nread) {
901 /* only perform the special operation if we really did replace
902 anything */
903 nread = si;
904
905 /* upload from the new (replaced) buffer instead */
906 data->req.upload_fromhere = data->state.scratch;
907
908 /* set the new amount too */
909 data->req.upload_present = nread;
910 }
911 }
912 } /* if 0 == data->req.upload_present */
913 else {
914 /* We have a partial buffer left from a previous "round". Use
915 that instead of reading more data */
916 }
917
918 /* write to socket (send away data) */
919 result = Curl_write(conn,
920 conn->writesockfd, /* socket to send to */
921 data->req.upload_fromhere, /* buffer pointer */
922 data->req.upload_present, /* buffer size */
923 &bytes_written); /* actually sent */
924
925 if(result)
926 return result;
927
928 if(data->set.verbose)
929 /* show the data before we change the pointer upload_fromhere */
930 Curl_debug(data, CURLINFO_DATA_OUT, data->req.upload_fromhere,
931 (size_t)bytes_written, conn);
932
933 if(data->req.upload_present != bytes_written) {
934 /* we only wrote a part of the buffer (if anything), deal with it! */
935
936 /* store the amount of bytes left in the buffer to write */
937 data->req.upload_present -= bytes_written;
938
939 /* advance the pointer where to find the buffer when the next send
940 is to happen */
941 data->req.upload_fromhere += bytes_written;
942 }
943 else {
944 /* we've uploaded that buffer now */
945 data->req.upload_fromhere = k->uploadbuf;
946 data->req.upload_present = 0; /* no more bytes left */
947
948 if(k->upload_done) {
949 /* switch off writing, we're done! */
950 k->keepon &= ~KEEP_SEND; /* we're done writing */
951 }
952 }
953
954 k->writebytecount += bytes_written;
955 Curl_pgrsSetUploadCounter(data, k->writebytecount);
956
957 } while(0); /* just to break out from! */
958
959 return CURLE_OK;
960}
961
962/*
963 * Curl_readwrite() is the low-level function to be called when data is to
964 * be read and written to/from the connection.
965 */
966CURLcode Curl_readwrite(struct connectdata *conn,
967 bool *done)
968{
969 struct SessionHandle *data = conn->data;
970 struct SingleRequest *k = &data->req;
971 CURLcode result;
972 int didwhat=0;
973
974 curl_socket_t fd_read;
975 curl_socket_t fd_write;
976 int select_res = conn->cselect_bits;
977
978 conn->cselect_bits = 0;
979
980 /* only use the proper socket if the *_HOLD bit is not set simultaneously as
981 then we are in rate limiting state in that transfer direction */
982
983 if((k->keepon & KEEP_RECVBITS) == KEEP_RECV)
984 fd_read = conn->sockfd;
985 else
986 fd_read = CURL_SOCKET_BAD;
987
988 if((k->keepon & KEEP_SENDBITS) == KEEP_SEND)
989 fd_write = conn->writesockfd;
990 else
991 fd_write = CURL_SOCKET_BAD;
992
993 if(!select_res) /* Call for select()/poll() only, if read/write/error
994 status is not known. */
995 select_res = Curl_socket_ready(fd_read, fd_write, 0);
996
997 if(select_res == CURL_CSELECT_ERR) {
998 failf(data, "select/poll returned error");
999 return CURLE_SEND_ERROR;
1000 }
1001
1002 /* We go ahead and do a read if we have a readable socket or if
1003 the stream was rewound (in which case we have data in a
1004 buffer) */
1005 if((k->keepon & KEEP_RECV) &&
1006 ((select_res & CURL_CSELECT_IN) || conn->bits.stream_was_rewound)) {
1007
1008 result = readwrite_data(data, conn, k, &didwhat, done);
1009 if(result || *done)
1010 return result;
1011 }
1012
1013 /* If we still have writing to do, we check if we have a writable socket. */
1014 if((k->keepon & KEEP_SEND) && (select_res & CURL_CSELECT_OUT)) {
1015 /* write */
1016
1017 result = readwrite_upload(data, conn, k, &didwhat);
1018 if(result)
1019 return result;
1020 }
1021
1022 k->now = Curl_tvnow();
1023 if(didwhat) {
1024 /* Update read/write counters */
1025 if(k->bytecountp)
1026 *k->bytecountp = k->bytecount; /* read count */
1027 if(k->writebytecountp)
1028 *k->writebytecountp = k->writebytecount; /* write count */
1029 }
1030 else {
1031 /* no read no write, this is a timeout? */
1032 if(k->exp100 == EXP100_AWAITING_CONTINUE) {
1033 /* This should allow some time for the header to arrive, but only a
1034 very short time as otherwise it'll be too much wasted time too
1035 often. */
1036
1037 /* Quoting RFC2616, section "8.2.3 Use of the 100 (Continue) Status":
1038
1039 Therefore, when a client sends this header field to an origin server
1040 (possibly via a proxy) from which it has never seen a 100 (Continue)
1041 status, the client SHOULD NOT wait for an indefinite period before
1042 sending the request body.
1043
1044 */
1045
1046 long ms = Curl_tvdiff(k->now, k->start100);
1047 if(ms > CURL_TIMEOUT_EXPECT_100) {
1048 /* we've waited long enough, continue anyway */
1049 k->exp100 = EXP100_SEND_DATA;
1050 k->keepon |= KEEP_SEND;
1051 infof(data, "Done waiting for 100-continue\n");
1052 }
1053 }
1054 }
1055
1056 if(Curl_pgrsUpdate(conn))
1057 result = CURLE_ABORTED_BY_CALLBACK;
1058 else
1059 result = Curl_speedcheck(data, k->now);
1060 if(result)
1061 return result;
1062
1063 if(k->keepon) {
1064 if(data->set.timeout &&
1065 (Curl_tvdiff(k->now, k->start) >= data->set.timeout)) {
1066 if(k->size != -1) {
1067 failf(data, "Operation timed out after %ld milliseconds with %"
1068 FORMAT_OFF_T " out of %" FORMAT_OFF_T " bytes received",
1069 Curl_tvdiff(k->now, k->start), k->bytecount, k->size);
1070 } else {
1071 failf(data, "Operation timed out after %ld milliseconds with %"
1072 FORMAT_OFF_T " bytes received",
1073 Curl_tvdiff(k->now, k->start), k->bytecount);
1074 }
1075 return CURLE_OPERATION_TIMEDOUT;
1076 }
1077 }
1078 else {
1079 /*
1080 * The transfer has been performed. Just make some general checks before
1081 * returning.
1082 */
1083
1084 if(!(data->set.opt_no_body) && (k->size != -1) &&
1085 (k->bytecount != k->size) &&
1086#ifdef CURL_DO_LINEEND_CONV
1087 /* Most FTP servers don't adjust their file SIZE response for CRLFs,
1088 so we'll check to see if the discrepancy can be explained
1089 by the number of CRLFs we've changed to LFs.
1090 */
1091 (k->bytecount != (k->size + data->state.crlf_conversions)) &&
1092#endif /* CURL_DO_LINEEND_CONV */
1093 !data->req.newurl) {
1094 failf(data, "transfer closed with %" FORMAT_OFF_T
1095 " bytes remaining to read",
1096 k->size - k->bytecount);
1097 return CURLE_PARTIAL_FILE;
1098 }
1099 else if(!(data->set.opt_no_body) &&
1100 k->chunk &&
1101 (conn->chunk.state != CHUNK_STOP)) {
1102 /*
1103 * In chunked mode, return an error if the connection is closed prior to
1104 * the empty (terminiating) chunk is read.
1105 *
1106 * The condition above used to check for
1107 * conn->proto.http->chunk.datasize != 0 which is true after reading
1108 * *any* chunk, not just the empty chunk.
1109 *
1110 */
1111 failf(data, "transfer closed with outstanding read data remaining");
1112 return CURLE_PARTIAL_FILE;
1113 }
1114 if(Curl_pgrsUpdate(conn))
1115 return CURLE_ABORTED_BY_CALLBACK;
1116 }
1117
1118 /* Now update the "done" boolean we return */
1119 *done = (bool)(0 == (k->keepon&(KEEP_RECV|KEEP_SEND|
1120 KEEP_RECV_PAUSE|KEEP_SEND_PAUSE)));
1121
1122 return CURLE_OK;
1123}
1124
1125/*
1126 * Curl_single_getsock() gets called by the multi interface code when the app
1127 * has requested to get the sockets for the current connection. This function
1128 * will then be called once for every connection that the multi interface
1129 * keeps track of. This function will only be called for connections that are
1130 * in the proper state to have this information available.
1131 */
1132int Curl_single_getsock(const struct connectdata *conn,
1133 curl_socket_t *sock, /* points to numsocks number
1134 of sockets */
1135 int numsocks)
1136{
1137 const struct SessionHandle *data = conn->data;
1138 int bitmap = GETSOCK_BLANK;
1139 unsigned sockindex = 0;
1140
1141 if(conn->handler->perform_getsock)
1142 return conn->handler->perform_getsock(conn, sock, numsocks);
1143
1144 if(numsocks < 2)
1145 /* simple check but we might need two slots */
1146 return GETSOCK_BLANK;
1147
1148 /* don't include HOLD and PAUSE connections */
1149 if((data->req.keepon & KEEP_RECVBITS) == KEEP_RECV) {
1150
1151 DEBUGASSERT(conn->sockfd != CURL_SOCKET_BAD);
1152
1153 bitmap |= GETSOCK_READSOCK(sockindex);
1154 sock[sockindex] = conn->sockfd;
1155 }
1156
1157 /* don't include HOLD and PAUSE connections */
1158 if((data->req.keepon & KEEP_SENDBITS) == KEEP_SEND) {
1159
1160 if((conn->sockfd != conn->writesockfd) ||
1161 !(data->req.keepon & KEEP_RECV)) {
1162 /* only if they are not the same socket or we didn't have a readable
1163 one, we increase index */
1164 if(data->req.keepon & KEEP_RECV)
1165 sockindex++; /* increase index if we need two entries */
1166
1167 DEBUGASSERT(conn->writesockfd != CURL_SOCKET_BAD);
1168
1169 sock[sockindex] = conn->writesockfd;
1170 }
1171
1172 bitmap |= GETSOCK_WRITESOCK(sockindex);
1173 }
1174
1175 return bitmap;
1176}
1177
1178/*
1179 * Determine optimum sleep time based on configured rate, current rate,
1180 * and packet size.
1181 * Returns value in mili-seconds.
1182 *
1183 * The basic idea is to adjust the desired rate up/down in this method
1184 * based on whether we are running too slow or too fast. Then, calculate
1185 * how many miliseconds to wait for the next packet to achieve this new
1186 * rate.
1187 */
1188long Curl_sleep_time(curl_off_t rate_bps, curl_off_t cur_rate_bps,
1189 int pkt_size)
1190{
1191 curl_off_t min_sleep = 0;
1192 curl_off_t rv = 0;
1193
1194 if (rate_bps == 0)
1195 return 0;
1196
1197 /* If running faster than about .1% of the desired speed, slow
1198 * us down a bit. Use shift instead of division as the 0.1%
1199 * cutoff is arbitrary anyway.
1200 */
1201 if (cur_rate_bps > (rate_bps + (rate_bps >> 10))) {
1202 /* running too fast, decrease target rate by 1/64th of rate */
1203 rate_bps -= rate_bps >> 6;
1204 min_sleep = 1;
1205 }
1206 else if (cur_rate_bps < (rate_bps - (rate_bps >> 10))) {
1207 /* running too slow, increase target rate by 1/64th of rate */
1208 rate_bps += rate_bps >> 6;
1209 }
1210
1211 /* Determine number of miliseconds to wait until we do
1212 * the next packet at the adjusted rate. We should wait
1213 * longer when using larger packets, for instance.
1214 */
1215 rv = ((curl_off_t)((pkt_size * 8) * 1000) / rate_bps);
1216
1217 /* Catch rounding errors and always slow down at least 1ms if
1218 * we are running too fast.
1219 */
1220 if (rv < min_sleep)
1221 rv = min_sleep;
1222
1223 /* Bound value to fit in 'long' on 32-bit platform. That's
1224 * plenty long enough anyway!
1225 */
1226 if(rv > 0x7fffffff)
1227 rv = 0x7fffffff;
1228
1229 return (long)rv;
1230}
1231
1232
1233/*
1234 * Transfer()
1235 *
1236 * This function is what performs the actual transfer. It is capable of doing
1237 * both ways simultaneously. The transfer must already have been setup by a
1238 * call to Curl_setup_transfer().
1239 *
1240 * Note that headers are created in a preallocated buffer of a default size.
1241 * That buffer can be enlarged on demand, but it is never shrunken again.
1242 *
1243 */
1244
1245static CURLcode
1246Transfer(struct connectdata *conn)
1247{
1248 CURLcode result;
1249 struct SessionHandle *data = conn->data;
1250 struct SingleRequest *k = &data->req;
1251 bool done=FALSE;
1252 bool first=TRUE;
1253 int timeout_ms;
1254 int buffersize;
1255 int totmp;
1256
1257 if((conn->sockfd == CURL_SOCKET_BAD) &&
1258 (conn->writesockfd == CURL_SOCKET_BAD))
1259 /* nothing to read, nothing to write, we're already OK! */
1260 return CURLE_OK;
1261
1262 /* we want header and/or body, if neither then don't do this! */
1263 if(!k->getheader && data->set.opt_no_body)
1264 return CURLE_OK;
1265
1266 while(!done) {
1267 curl_socket_t fd_read = conn->sockfd;
1268 curl_socket_t fd_write = conn->writesockfd;
1269 int keepon = k->keepon;
1270 timeout_ms = 1000;
1271
1272 if(conn->waitfor) {
1273 /* if waitfor is set, get the RECV and SEND bits from that but keep the
1274 other bits */
1275 keepon &= ~ (KEEP_RECV|KEEP_SEND);
1276 keepon |= conn->waitfor & (KEEP_RECV|KEEP_SEND);
1277 }
1278
1279 /* limit-rate logic: if speed exceeds threshold, then do not include fd in
1280 select set. The current speed is recalculated in each Curl_readwrite()
1281 call */
1282 if((keepon & KEEP_SEND) &&
1283 (!data->set.max_send_speed ||
1284 (data->progress.ulspeed < data->set.max_send_speed) )) {
1285 k->keepon &= ~KEEP_SEND_HOLD;
1286 }
1287 else {
1288 if (data->set.upload && data->set.max_send_speed &&
1289 (data->progress.ulspeed > data->set.max_send_speed) ) {
1290 /* calculate upload rate-limitation timeout. */
1291 buffersize = (int)(data->set.buffer_size ?
1292 data->set.buffer_size : BUFSIZE);
1293 totmp = (int)Curl_sleep_time(data->set.max_send_speed,
1294 data->progress.ulspeed, buffersize);
1295 if (totmp < timeout_ms)
1296 timeout_ms = totmp;
1297 }
1298 fd_write = CURL_SOCKET_BAD;
1299 if(keepon & KEEP_SEND)
1300 k->keepon |= KEEP_SEND_HOLD; /* hold it */
1301 }
1302
1303 if((keepon & KEEP_RECV) &&
1304 (!data->set.max_recv_speed ||
1305 (data->progress.dlspeed < data->set.max_recv_speed)) ) {
1306 k->keepon &= ~KEEP_RECV_HOLD;
1307 }
1308 else {
1309 if ((!data->set.upload) && data->set.max_recv_speed &&
1310 (data->progress.dlspeed > data->set.max_recv_speed)) {
1311 /* Calculate download rate-limitation timeout. */
1312 buffersize = (int)(data->set.buffer_size ?
1313 data->set.buffer_size : BUFSIZE);
1314 totmp = (int)Curl_sleep_time(data->set.max_recv_speed,
1315 data->progress.dlspeed, buffersize);
1316 if (totmp < timeout_ms)
1317 timeout_ms = totmp;
1318 }
1319 fd_read = CURL_SOCKET_BAD;
1320 if(keepon & KEEP_RECV)
1321 k->keepon |= KEEP_RECV_HOLD; /* hold it */
1322 }
1323
1324 /* pause logic. Don't check descriptors for paused connections */
1325 if(k->keepon & KEEP_RECV_PAUSE)
1326 fd_read = CURL_SOCKET_BAD;
1327 if(k->keepon & KEEP_SEND_PAUSE)
1328 fd_write = CURL_SOCKET_BAD;
1329
1330 /* The *_HOLD and *_PAUSE logic is necessary since even though there might
1331 be no traffic during the select interval, we still call
1332 Curl_readwrite() for the timeout case and if we limit transfer speed we
1333 must make sure that this function doesn't transfer anything while in
1334 HOLD status.
1335
1336 The no timeout for the first round is for the protocols for which data
1337 has already been slurped off the socket and thus waiting for action
1338 won't work since it'll wait even though there is already data present
1339 to work with. */
1340 if(first &&
1341 ((fd_read != CURL_SOCKET_BAD) || (fd_write != CURL_SOCKET_BAD)))
1342 /* if this is the first lap and one of the file descriptors is fine
1343 to work with, skip the timeout */
1344 timeout_ms = 0;
1345 else {
1346 if(data->set.timeout) {
1347 totmp = (int)(data->set.timeout - Curl_tvdiff(k->now, k->start));
1348 if(totmp < 0)
1349 return CURLE_OPERATION_TIMEDOUT;
1350 }
1351 else
1352 totmp = 1000;
1353
1354 if (totmp < timeout_ms)
1355 timeout_ms = totmp;
1356 }
1357
1358 switch (Curl_socket_ready(fd_read, fd_write, timeout_ms)) {
1359 case -1: /* select() error, stop reading */
1360#ifdef EINTR
1361 /* The EINTR is not serious, and it seems you might get this more
1362 often when using the lib in a multi-threaded environment! */
1363 if(SOCKERRNO == EINTR)
1364 continue;
1365#endif
1366 return CURLE_RECV_ERROR; /* indicate a network problem */
1367 case 0: /* timeout */
1368 default: /* readable descriptors */
1369
1370 result = Curl_readwrite(conn, &done);
1371 /* "done" signals to us if the transfer(s) are ready */
1372 break;
1373 }
1374 if(result)
1375 return result;
1376
1377 first = FALSE; /* not the first lap anymore */
1378 }
1379
1380 return CURLE_OK;
1381}
1382
1383/*
1384 * Curl_pretransfer() is called immediately before a transfer starts.
1385 */
1386CURLcode Curl_pretransfer(struct SessionHandle *data)
1387{
1388 CURLcode res;
1389 if(!data->change.url) {
1390 /* we can't do anything without URL */
1391 failf(data, "No URL set!");
1392 return CURLE_URL_MALFORMAT;
1393 }
1394
1395 /* Init the SSL session ID cache here. We do it here since we want to do it
1396 after the *_setopt() calls (that could change the size of the cache) but
1397 before any transfer takes place. */
1398 res = Curl_ssl_initsessions(data, data->set.ssl.numsessions);
1399 if(res)
1400 return res;
1401
1402 data->set.followlocation=0; /* reset the location-follow counter */
1403 data->state.this_is_a_follow = FALSE; /* reset this */
1404 data->state.errorbuf = FALSE; /* no error has occurred */
1405 data->state.httpversion = 0; /* don't assume any particular server version */
1406
1407 data->state.ssl_connect_retry = FALSE;
1408
1409 data->state.authproblem = FALSE;
1410 data->state.authhost.want = data->set.httpauth;
1411 data->state.authproxy.want = data->set.proxyauth;
1412 Curl_safefree(data->info.wouldredirect);
1413 data->info.wouldredirect = NULL;
1414
1415 /* If there is a list of cookie files to read, do it now! */
1416 if(data->change.cookielist) {
1417 Curl_cookie_loadfiles(data);
1418 }
1419
1420 /* Allow data->set.use_port to set which port to use. This needs to be
1421 * disabled for example when we follow Location: headers to URLs using
1422 * different ports! */
1423 data->state.allow_port = TRUE;
1424
1425#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1426 /*************************************************************
1427 * Tell signal handler to ignore SIGPIPE
1428 *************************************************************/
1429 if(!data->set.no_signal)
1430 data->state.prev_signal = signal(SIGPIPE, SIG_IGN);
1431#endif
1432
1433 Curl_initinfo(data); /* reset session-specific information "variables" */
1434 Curl_pgrsStartNow(data);
1435
1436 return CURLE_OK;
1437}
1438
1439/*
1440 * Curl_posttransfer() is called immediately after a transfer ends
1441 */
1442CURLcode Curl_posttransfer(struct SessionHandle *data)
1443{
1444#if defined(HAVE_SIGNAL) && defined(SIGPIPE) && !defined(HAVE_MSG_NOSIGNAL)
1445 /* restore the signal handler for SIGPIPE before we get back */
1446 if(!data->set.no_signal)
1447 signal(SIGPIPE, data->state.prev_signal);
1448#else
1449 (void)data; /* unused parameter */
1450#endif
1451
1452 if(!(data->progress.flags & PGRS_HIDE) &&
1453 !data->progress.callback)
1454 /* only output if we don't use a progress callback and we're not hidden */
1455 fprintf(data->set.err, "\n");
1456
1457 return CURLE_OK;
1458}
1459
1460#ifndef CURL_DISABLE_HTTP
1461/*
1462 * strlen_url() returns the length of the given URL if the spaces within the
1463 * URL were properly URL encoded.
1464 */
1465static size_t strlen_url(const char *url)
1466{
1467 const char *ptr;
1468 size_t newlen=0;
1469 bool left=TRUE; /* left side of the ? */
1470
1471 for(ptr=url; *ptr; ptr++) {
1472 switch(*ptr) {
1473 case '?':
1474 left=FALSE;
1475 /* fall through */
1476 default:
1477 newlen++;
1478 break;
1479 case ' ':
1480 if(left)
1481 newlen+=3;
1482 else
1483 newlen++;
1484 break;
1485 }
1486 }
1487 return newlen;
1488}
1489
1490/* strcpy_url() copies a url to a output buffer and URL-encodes the spaces in
1491 * the source URL accordingly.
1492 */
1493static void strcpy_url(char *output, const char *url)
1494{
1495 /* we must add this with whitespace-replacing */
1496 bool left=TRUE;
1497 const char *iptr;
1498 char *optr = output;
1499 for(iptr = url; /* read from here */
1500 *iptr; /* until zero byte */
1501 iptr++) {
1502 switch(*iptr) {
1503 case '?':
1504 left=FALSE;
1505 /* fall through */
1506 default:
1507 *optr++=*iptr;
1508 break;
1509 case ' ':
1510 if(left) {
1511 *optr++='%'; /* add a '%' */
1512 *optr++='2'; /* add a '2' */
1513 *optr++='0'; /* add a '0' */
1514 }
1515 else
1516 *optr++='+'; /* add a '+' here */
1517 break;
1518 }
1519 }
1520 *optr=0; /* zero terminate output buffer */
1521
1522}
1523
1524/*
1525 * Returns true if the given URL is absolute (as opposed to relative)
1526 */
1527static bool is_absolute_url(const char *url)
1528{
1529 char prot[16]; /* URL protocol string storage */
1530 char letter; /* used for a silly sscanf */
1531
1532 return (bool)(2 == sscanf(url, "%15[^?&/:]://%c", prot, &letter));
1533}
1534
1535/*
1536 * Concatenate a relative URL to a base URL making it absolute.
1537 * URL-encodes any spaces.
1538 * The returned pointer must be freed by the caller unless NULL
1539 * (returns NULL on out of memory).
1540 */
1541static char *concat_url(const char *base, const char *relurl)
1542{
1543 /***
1544 TRY to append this new path to the old URL
1545 to the right of the host part. Oh crap, this is doomed to cause
1546 problems in the future...
1547 */
1548 char *newest;
1549 char *protsep;
1550 char *pathsep;
1551 size_t newlen;
1552
1553 const char *useurl = relurl;
1554 size_t urllen;
1555
1556 /* we must make our own copy of the URL to play with, as it may
1557 point to read-only data */
1558 char *url_clone=strdup(base);
1559
1560 if(!url_clone)
1561 return NULL; /* skip out of this NOW */
1562
1563 /* protsep points to the start of the host name */
1564 protsep=strstr(url_clone, "//");
1565 if(!protsep)
1566 protsep=url_clone;
1567 else
1568 protsep+=2; /* pass the slashes */
1569
1570 if('/' != relurl[0]) {
1571 int level=0;
1572
1573 /* First we need to find out if there's a ?-letter in the URL,
1574 and cut it and the right-side of that off */
1575 pathsep = strchr(protsep, '?');
1576 if(pathsep)
1577 *pathsep=0;
1578
1579 /* we have a relative path to append to the last slash if there's one
1580 available, or if the new URL is just a query string (starts with a
1581 '?') we append the new one at the end of the entire currently worked
1582 out URL */
1583 if(useurl[0] != '?') {
1584 pathsep = strrchr(protsep, '/');
1585 if(pathsep)
1586 *pathsep=0;
1587 }
1588
1589 /* Check if there's any slash after the host name, and if so, remember
1590 that position instead */
1591 pathsep = strchr(protsep, '/');
1592 if(pathsep)
1593 protsep = pathsep+1;
1594 else
1595 protsep = NULL;
1596
1597 /* now deal with one "./" or any amount of "../" in the newurl
1598 and act accordingly */
1599
1600 if((useurl[0] == '.') && (useurl[1] == '/'))
1601 useurl+=2; /* just skip the "./" */
1602
1603 while((useurl[0] == '.') &&
1604 (useurl[1] == '.') &&
1605 (useurl[2] == '/')) {
1606 level++;
1607 useurl+=3; /* pass the "../" */
1608 }
1609
1610 if(protsep) {
1611 while(level--) {
1612 /* cut off one more level from the right of the original URL */
1613 pathsep = strrchr(protsep, '/');
1614 if(pathsep)
1615 *pathsep=0;
1616 else {
1617 *protsep=0;
1618 break;
1619 }
1620 }
1621 }
1622 }
1623 else {
1624 /* We got a new absolute path for this server, cut off from the
1625 first slash */
1626 pathsep = strchr(protsep, '/');
1627 if(pathsep) {
1628 /* When people use badly formatted URLs, such as
1629 "http://www.url.com?dir=/home/daniel" we must not use the first
1630 slash, if there's a ?-letter before it! */
1631 char *sep = strchr(protsep, '?');
1632 if(sep && (sep < pathsep))
1633 pathsep = sep;
1634 *pathsep=0;
1635 }
1636 else {
1637 /* There was no slash. Now, since we might be operating on a badly
1638 formatted URL, such as "http://www.url.com?id=2380" which doesn't
1639 use a slash separator as it is supposed to, we need to check for a
1640 ?-letter as well! */
1641 pathsep = strchr(protsep, '?');
1642 if(pathsep)
1643 *pathsep=0;
1644 }
1645 }
1646
1647 /* If the new part contains a space, this is a mighty stupid redirect
1648 but we still make an effort to do "right". To the left of a '?'
1649 letter we replace each space with %20 while it is replaced with '+'
1650 on the right side of the '?' letter.
1651 */
1652 newlen = strlen_url(useurl);
1653
1654 urllen = strlen(url_clone);
1655
1656 newest = malloc( urllen + 1 + /* possible slash */
1657 newlen + 1 /* zero byte */);
1658
1659 if(!newest) {
1660 free(url_clone); /* don't leak this */
1661 return NULL;
1662 }
1663
1664 /* copy over the root url part */
1665 memcpy(newest, url_clone, urllen);
1666
1667 /* check if we need to append a slash */
1668 if(('/' == useurl[0]) || (protsep && !*protsep) || ('?' == useurl[0]))
1669 ;
1670 else
1671 newest[urllen++]='/';
1672
1673 /* then append the new piece on the right side */
1674 strcpy_url(&newest[urllen], useurl);
1675
1676 free(url_clone);
1677
1678 return newest;
1679}
1680#endif /* CURL_DISABLE_HTTP */
1681
1682/*
1683 * Curl_follow() handles the URL redirect magic. Pass in the 'newurl' string
1684 * as given by the remote server and set up the new URL to request.
1685 */
1686CURLcode Curl_follow(struct SessionHandle *data,
1687 char *newurl, /* this 'newurl' is the Location: string,
1688 and it must be malloc()ed before passed
1689 here */
1690 followtype type) /* see transfer.h */
1691{
1692#ifdef CURL_DISABLE_HTTP
1693 (void)data;
1694 (void)newurl;
1695 (void)type;
1696 /* Location: following will not happen when HTTP is disabled */
1697 return CURLE_TOO_MANY_REDIRECTS;
1698#else
1699
1700 /* Location: redirect */
1701 bool disallowport = FALSE;
1702
1703 if(type == FOLLOW_REDIR) {
1704 if((data->set.maxredirs != -1) &&
1705 (data->set.followlocation >= data->set.maxredirs)) {
1706 failf(data,"Maximum (%ld) redirects followed", data->set.maxredirs);
1707 return CURLE_TOO_MANY_REDIRECTS;
1708 }
1709
1710 /* mark the next request as a followed location: */
1711 data->state.this_is_a_follow = TRUE;
1712
1713 data->set.followlocation++; /* count location-followers */
1714
1715 if(data->set.http_auto_referer) {
1716 /* We are asked to automatically set the previous URL as the referer
1717 when we get the next URL. We pick the ->url field, which may or may
1718 not be 100% correct */
1719
1720 if(data->change.referer_alloc)
1721 /* If we already have an allocated referer, free this first */
1722 free(data->change.referer);
1723
1724 data->change.referer = strdup(data->change.url);
1725 if (!data->change.referer) {
1726 data->change.referer_alloc = FALSE;
1727 return CURLE_OUT_OF_MEMORY;
1728 }
1729 data->change.referer_alloc = TRUE; /* yes, free this later */
1730 }
1731 }
1732
1733 if(!is_absolute_url(newurl)) {
1734 /***
1735 *DANG* this is an RFC 2068 violation. The URL is supposed
1736 to be absolute and this doesn't seem to be that!
1737 */
1738 char *absolute = concat_url(data->change.url, newurl);
1739 if (!absolute)
1740 return CURLE_OUT_OF_MEMORY;
1741 free(newurl);
1742 newurl = absolute;
1743 }
1744 else {
1745 /* This is an absolute URL, don't allow the custom port number */
1746 disallowport = TRUE;
1747
1748 if(strchr(newurl, ' ')) {
1749 /* This new URL contains at least one space, this is a mighty stupid
1750 redirect but we still make an effort to do "right". */
1751 char *newest;
1752 size_t newlen = strlen_url(newurl);
1753
1754 newest = malloc(newlen+1); /* get memory for this */
1755 if (!newest)
1756 return CURLE_OUT_OF_MEMORY;
1757 strcpy_url(newest, newurl); /* create a space-free URL */
1758
1759 free(newurl); /* that was no good */
1760 newurl = newest; /* use this instead now */
1761 }
1762
1763 }
1764
1765 if(type == FOLLOW_FAKE) {
1766 /* we're only figuring out the new url if we would've followed locations
1767 but now we're done so we can get out! */
1768 data->info.wouldredirect = newurl;
1769 return CURLE_OK;
1770 }
1771
1772 if(disallowport)
1773 data->state.allow_port = FALSE;
1774
1775 if(data->change.url_alloc)
1776 free(data->change.url);
1777 else
1778 data->change.url_alloc = TRUE; /* the URL is allocated */
1779
1780 data->change.url = newurl;
1781 newurl = NULL; /* don't free! */
1782
1783 infof(data, "Issue another request to this URL: '%s'\n", data->change.url);
1784
1785 /*
1786 * We get here when the HTTP code is 300-399 (and 401). We need to perform
1787 * differently based on exactly what return code there was.
1788 *
1789 * News from 7.10.6: we can also get here on a 401 or 407, in case we act on
1790 * a HTTP (proxy-) authentication scheme other than Basic.
1791 */
1792 switch(data->info.httpcode) {
1793 /* 401 - Act on a WWW-Authenticate, we keep on moving and do the
1794 Authorization: XXXX header in the HTTP request code snippet */
1795 /* 407 - Act on a Proxy-Authenticate, we keep on moving and do the
1796 Proxy-Authorization: XXXX header in the HTTP request code snippet */
1797 /* 300 - Multiple Choices */
1798 /* 306 - Not used */
1799 /* 307 - Temporary Redirect */
1800 default: /* for all above (and the unknown ones) */
1801 /* Some codes are explicitly mentioned since I've checked RFC2616 and they
1802 * seem to be OK to POST to.
1803 */
1804 break;
1805 case 301: /* Moved Permanently */
1806 /* (quote from RFC2616, section 10.3.2):
1807 *
1808 * Note: When automatically redirecting a POST request after receiving a
1809 * 301 status code, some existing HTTP/1.0 user agents will erroneously
1810 * change it into a GET request.
1811 *
1812 * ----
1813 *
1814 * Warning: Because most of importants user agents do this obvious RFC2616
1815 * violation, many webservers expect this misbehavior. So these servers
1816 * often answers to a POST request with an error page. To be sure that
1817 * libcurl gets the page that most user agents would get, libcurl has to
1818 * force GET.
1819 *
1820 * This behaviour can be overridden with CURLOPT_POSTREDIR.
1821 */
1822 if( (data->set.httpreq == HTTPREQ_POST
1823 || data->set.httpreq == HTTPREQ_POST_FORM)
1824 && !data->set.post301) {
1825 infof(data,
1826 "Violate RFC 2616/10.3.2 and switch from POST to GET\n");
1827 data->set.httpreq = HTTPREQ_GET;
1828 }
1829 break;
1830 case 302: /* Found */
1831 /* (From 10.3.3)
1832
1833 Note: RFC 1945 and RFC 2068 specify that the client is not allowed
1834 to change the method on the redirected request. However, most
1835 existing user agent implementations treat 302 as if it were a 303
1836 response, performing a GET on the Location field-value regardless
1837 of the original request method. The status codes 303 and 307 have
1838 been added for servers that wish to make unambiguously clear which
1839 kind of reaction is expected of the client.
1840
1841 (From 10.3.4)
1842
1843 Note: Many pre-HTTP/1.1 user agents do not understand the 303
1844 status. When interoperability with such clients is a concern, the
1845 302 status code may be used instead, since most user agents react
1846 to a 302 response as described here for 303.
1847
1848 This behaviour can be overriden with CURLOPT_POSTREDIR
1849 */
1850 if( (data->set.httpreq == HTTPREQ_POST
1851 || data->set.httpreq == HTTPREQ_POST_FORM)
1852 && !data->set.post302) {
1853 infof(data,
1854 "Violate RFC 2616/10.3.3 and switch from POST to GET\n");
1855 data->set.httpreq = HTTPREQ_GET;
1856 }
1857 break;
1858
1859 case 303: /* See Other */
1860 /* Disable both types of POSTs, since doing a second POST when
1861 * following isn't what anyone would want! */
1862 if(data->set.httpreq != HTTPREQ_GET) {
1863 data->set.httpreq = HTTPREQ_GET; /* enforce GET request */
1864 infof(data, "Disables POST, goes with %s\n",
1865 data->set.opt_no_body?"HEAD":"GET");
1866 }
1867 break;
1868 case 304: /* Not Modified */
1869 /* 304 means we did a conditional request and it was "Not modified".
1870 * We shouldn't get any Location: header in this response!
1871 */
1872 break;
1873 case 305: /* Use Proxy */
1874 /* (quote from RFC2616, section 10.3.6):
1875 * "The requested resource MUST be accessed through the proxy given
1876 * by the Location field. The Location field gives the URI of the
1877 * proxy. The recipient is expected to repeat this single request
1878 * via the proxy. 305 responses MUST only be generated by origin
1879 * servers."
1880 */
1881 break;
1882 }
1883 Curl_pgrsTime(data, TIMER_REDIRECT);
1884 Curl_pgrsResetTimes(data);
1885
1886 return CURLE_OK;
1887#endif /* CURL_DISABLE_HTTP */
1888}
1889
1890static CURLcode
1891connect_host(struct SessionHandle *data,
1892 struct connectdata **conn)
1893{
1894 CURLcode res = CURLE_OK;
1895
1896 bool async;
1897 bool protocol_done=TRUE; /* will be TRUE always since this is only used
1898 within the easy interface */
1899 Curl_pgrsTime(data, TIMER_STARTSINGLE);
1900 res = Curl_connect(data, conn, &async, &protocol_done);
1901
1902 if((CURLE_OK == res) && async) {
1903 /* Now, if async is TRUE here, we need to wait for the name
1904 to resolve */
1905 res = Curl_wait_for_resolv(*conn, NULL);
1906 if(CURLE_OK == res)
1907 /* Resolved, continue with the connection */
1908 res = Curl_async_resolved(*conn, &protocol_done);
1909 else
1910 /* if we can't resolve, we kill this "connection" now */
1911 (void)Curl_disconnect(*conn);
1912 }
1913
1914 return res;
1915}
1916
1917CURLcode
1918Curl_reconnect_request(struct connectdata **connp)
1919{
1920 CURLcode result = CURLE_OK;
1921 struct connectdata *conn = *connp;
1922 struct SessionHandle *data = conn->data;
1923
1924 /* This was a re-use of a connection and we got a write error in the
1925 * DO-phase. Then we DISCONNECT this connection and have another attempt to
1926 * CONNECT and then DO again! The retry cannot possibly find another
1927 * connection to re-use, since we only keep one possible connection for
1928 * each. */
1929
1930 infof(data, "Re-used connection seems dead, get a new one\n");
1931
1932 conn->bits.close = TRUE; /* enforce close of this connection */
1933 result = Curl_done(&conn, result, FALSE); /* we are so done with this */
1934
1935 /* conn may no longer be a good pointer */
1936
1937 /*
1938 * According to bug report #1330310. We need to check for CURLE_SEND_ERROR
1939 * here as well. I figure this could happen when the request failed on a FTP
1940 * connection and thus Curl_done() itself tried to use the connection
1941 * (again). Slight Lack of feedback in the report, but I don't think this
1942 * extra check can do much harm.
1943 */
1944 if((CURLE_OK == result) || (CURLE_SEND_ERROR == result)) {
1945 bool async;
1946 bool protocol_done = TRUE;
1947
1948 /* Now, redo the connect and get a new connection */
1949 result = Curl_connect(data, connp, &async, &protocol_done);
1950 if(CURLE_OK == result) {
1951 /* We have connected or sent away a name resolve query fine */
1952
1953 conn = *connp; /* setup conn to again point to something nice */
1954 if(async) {
1955 /* Now, if async is TRUE here, we need to wait for the name
1956 to resolve */
1957 result = Curl_wait_for_resolv(conn, NULL);
1958 if(result)
1959 return result;
1960
1961 /* Resolved, continue with the connection */
1962 result = Curl_async_resolved(conn, &protocol_done);
1963 if(result)
1964 return result;
1965 }
1966 }
1967 }
1968
1969 return result;
1970}
1971
1972/* Returns CURLE_OK *and* sets '*url' if a request retry is wanted.
1973
1974 NOTE: that the *url is malloc()ed. */
1975CURLcode Curl_retry_request(struct connectdata *conn,
1976 char **url)
1977{
1978 struct SessionHandle *data = conn->data;
1979
1980 *url = NULL;
1981
1982 /* if we're talking upload, we can't do the checks below, unless the protocol
1983 is HTTP as when uploading over HTTP we will still get a response */
1984 if(data->set.upload && !(conn->protocol&(PROT_HTTP|PROT_RTSP)))
1985 return CURLE_OK;
1986
1987 if(/* workaround for broken TLS servers */ data->state.ssl_connect_retry ||
1988 ((data->req.bytecount +
1989 data->req.headerbytecount == 0) &&
1990 conn->bits.reuse &&
1991 !data->set.opt_no_body &&
1992 data->set.rtspreq != RTSPREQ_RECEIVE)) {
1993 /* We got no data, we attempted to re-use a connection and yet we want a
1994 "body". This might happen if the connection was left alive when we were
1995 done using it before, but that was closed when we wanted to read from
1996 it again. Bad luck. Retry the same request on a fresh connect! */
1997 infof(conn->data, "Connection died, retrying a fresh connect\n");
1998 *url = strdup(conn->data->change.url);
1999 if(!*url)
2000 return CURLE_OUT_OF_MEMORY;
2001
2002 conn->bits.close = TRUE; /* close this connection */
2003 conn->bits.retry = TRUE; /* mark this as a connection we're about
2004 to retry. Marking it this way should
2005 prevent i.e HTTP transfers to return
2006 error just because nothing has been
2007 transfered! */
2008 }
2009 return CURLE_OK;
2010}
2011
2012/*
2013 * Curl_perform() is the internal high-level function that gets called by the
2014 * external curl_easy_perform() function. It inits, performs and cleans up a
2015 * single file transfer.
2016 */
2017CURLcode Curl_perform(struct SessionHandle *data)
2018{
2019 CURLcode res;
2020 CURLcode res2;
2021 struct connectdata *conn=NULL;
2022 char *newurl = NULL; /* possibly a new URL to follow to! */
2023 followtype follow = FOLLOW_NONE;
2024
2025 data->state.used_interface = Curl_if_easy;
2026
2027 res = Curl_pretransfer(data);
2028 if(res)
2029 return res;
2030
2031 /*
2032 * It is important that there is NO 'return' from this function at any other
2033 * place than falling down to the end of the function! This is because we
2034 * have cleanup stuff that must be done before we get back, and that is only
2035 * performed after this do-while loop.
2036 */
2037
2038 for(;;) {
2039 res = connect_host(data, &conn); /* primary connection */
2040
2041 if(res == CURLE_OK) {
2042 bool do_done;
2043 if(data->set.connect_only) {
2044 /* keep connection open for application to use the socket */
2045 conn->bits.close = FALSE;
2046 res = Curl_done(&conn, CURLE_OK, FALSE);
2047 break;
2048 }
2049 res = Curl_do(&conn, &do_done);
2050
2051 if(res == CURLE_OK) {
2052 res = Transfer(conn); /* now fetch that URL please */
2053 if((res == CURLE_OK) || (res == CURLE_RECV_ERROR)) {
2054 bool retry = FALSE;
2055 CURLcode rc = Curl_retry_request(conn, &newurl);
2056 if(rc)
2057 res = rc;
2058 else
2059 retry = (bool)(newurl?TRUE:FALSE);
2060
2061 if(retry) {
2062 res = CURLE_OK;
2063 follow = FOLLOW_RETRY;
2064 if (!newurl)
2065 res = CURLE_OUT_OF_MEMORY;
2066 }
2067 else if (res == CURLE_OK) {
2068 /*
2069 * We must duplicate the new URL here as the connection data may
2070 * be free()ed in the Curl_done() function. We prefer the newurl
2071 * one since that's used for redirects or just further requests
2072 * for retries or multi-stage HTTP auth methods etc.
2073 */
2074 if(data->req.newurl) {
2075 follow = FOLLOW_REDIR;
2076 newurl = strdup(data->req.newurl);
2077 if (!newurl)
2078 res = CURLE_OUT_OF_MEMORY;
2079 }
2080 else if(data->req.location) {
2081 follow = FOLLOW_FAKE;
2082 newurl = strdup(data->req.location);
2083 if (!newurl)
2084 res = CURLE_OUT_OF_MEMORY;
2085 }
2086 }
2087
2088 /* in the above cases where 'newurl' gets assigned, we have a fresh
2089 * allocated memory pointed to */
2090 }
2091 if(res != CURLE_OK) {
2092 /* The transfer phase returned error, we mark the connection to get
2093 * closed to prevent being re-used. This is because we can't
2094 * possibly know if the connection is in a good shape or not now. */
2095 conn->bits.close = TRUE;
2096
2097 if(CURL_SOCKET_BAD != conn->sock[SECONDARYSOCKET]) {
2098 /* if we failed anywhere, we must clean up the secondary socket if
2099 it was used */
2100 sclose(conn->sock[SECONDARYSOCKET]);
2101 conn->sock[SECONDARYSOCKET] = CURL_SOCKET_BAD;
2102 }
2103 }
2104
2105 /* Always run Curl_done(), even if some of the previous calls
2106 failed, but return the previous (original) error code */
2107 res2 = Curl_done(&conn, res, FALSE);
2108
2109 if(CURLE_OK == res)
2110 res = res2;
2111 }
2112 else if(conn)
2113 /* Curl_do() failed, clean up left-overs in the done-call, but note
2114 that at some cases the conn pointer is NULL when Curl_do() failed
2115 and the connection cache is very small so only call Curl_done() if
2116 conn is still "alive".
2117 */
2118 res2 = Curl_done(&conn, res, FALSE);
2119
2120 /*
2121 * Important: 'conn' cannot be used here, since it may have been closed
2122 * in 'Curl_done' or other functions.
2123 */
2124
2125 if((res == CURLE_OK) && follow) {
2126 res = Curl_follow(data, newurl, follow);
2127 if(CURLE_OK == res) {
2128 /* if things went fine, Curl_follow() freed or otherwise took
2129 responsibility for the newurl pointer */
2130 newurl = NULL;
2131 if(follow >= FOLLOW_RETRY) {
2132 follow = FOLLOW_NONE;
2133 continue;
2134 }
2135 /* else we break out of the loop below */
2136 }
2137 }
2138 }
2139 break; /* it only reaches here when this shouldn't loop */
2140
2141 } /* loop if Location: */
2142
2143 if(newurl)
2144 free(newurl);
2145
2146 if(res && !data->state.errorbuf) {
2147 /*
2148 * As an extra precaution: if no error string has been set and there was
2149 * an error, use the strerror() string or if things are so bad that not
2150 * even that is good, set a bad string that mentions the error code.
2151 */
2152 const char *str = curl_easy_strerror(res);
2153 if(!str)
2154 failf(data, "unspecified error %d", (int)res);
2155 else
2156 failf(data, "%s", str);
2157 }
2158
2159 /* run post-transfer unconditionally, but don't clobber the return code if
2160 we already have an error code recorder */
2161 res2 = Curl_posttransfer(data);
2162 if(!res && res2)
2163 res = res2;
2164
2165 return res;
2166}
2167
2168/*
2169 * Curl_setup_transfer() is called to setup some basic properties for the
2170 * upcoming transfer.
2171 */
2172CURLcode
2173Curl_setup_transfer(
2174 struct connectdata *conn, /* connection data */
2175 int sockindex, /* socket index to read from or -1 */
2176 curl_off_t size, /* -1 if unknown at this point */
2177 bool getheader, /* TRUE if header parsing is wanted */
2178 curl_off_t *bytecountp, /* return number of bytes read or NULL */
2179 int writesockindex, /* socket index to write to, it may very well be
2180 the same we read from. -1 disables */
2181 curl_off_t *writecountp /* return number of bytes written or NULL */
2182 )
2183{
2184 struct SessionHandle *data;
2185 struct SingleRequest *k;
2186
2187 DEBUGASSERT(conn != NULL);
2188
2189 data = conn->data;
2190 k = &data->req;
2191
2192 DEBUGASSERT((sockindex <= 1) && (sockindex >= -1));
2193
2194 /* now copy all input parameters */
2195 conn->sockfd = sockindex == -1 ?
2196 CURL_SOCKET_BAD : conn->sock[sockindex];
2197 conn->writesockfd = writesockindex == -1 ?
2198 CURL_SOCKET_BAD:conn->sock[writesockindex];
2199 k->getheader = getheader;
2200
2201 k->size = size;
2202 k->bytecountp = bytecountp;
2203 k->writebytecountp = writecountp;
2204
2205 /* The code sequence below is placed in this function just because all
2206 necessary input is not always known in do_complete() as this function may
2207 be called after that */
2208
2209 if(!k->getheader) {
2210 k->header = FALSE;
2211 if(size > 0)
2212 Curl_pgrsSetDownloadSize(data, size);
2213 }
2214 /* we want header and/or body, if neither then don't do this! */
2215 if(k->getheader || !data->set.opt_no_body) {
2216
2217 if(conn->sockfd != CURL_SOCKET_BAD) {
2218 k->keepon |= KEEP_RECV;
2219 }
2220
2221 if(conn->writesockfd != CURL_SOCKET_BAD) {
2222 /* HTTP 1.1 magic:
2223
2224 Even if we require a 100-return code before uploading data, we might
2225 need to write data before that since the REQUEST may not have been
2226 finished sent off just yet.
2227
2228 Thus, we must check if the request has been sent before we set the
2229 state info where we wait for the 100-return code
2230 */
2231 if((data->state.expect100header) &&
2232 (data->state.proto.http->sending == HTTPSEND_BODY)) {
2233 /* wait with write until we either got 100-continue or a timeout */
2234 k->exp100 = EXP100_AWAITING_CONTINUE;
2235 k->start100 = k->start;
2236 }
2237 else {
2238 if(data->state.expect100header)
2239 /* when we've sent off the rest of the headers, we must await a
2240 100-continue but first finish sending the request */
2241 k->exp100 = EXP100_SENDING_REQUEST;
2242
2243 /* enable the write bit when we're not waiting for continue */
2244 k->keepon |= KEEP_SEND;
2245 }
2246 } /* if(conn->writesockfd != CURL_SOCKET_BAD) */
2247 } /* if(k->getheader || !data->set.opt_no_body) */
2248
2249 return CURLE_OK;
2250}