summaryrefslogtreecommitdiff
path: root/ot_http.c
diff options
context:
space:
mode:
authorDirk Engling <erdgeist@erdgeist.org>2024-04-13 00:47:29 +0200
committerDirk Engling <erdgeist@erdgeist.org>2024-04-13 00:47:29 +0200
commit1a70d9f9ef81ac1b5e843ac71f3538f7845e03ae (patch)
tree20a20077503c01dc024e88a6a8d82bf89faf22fd /ot_http.c
parent301faeb10c5994a6fd31adc5f0b4f8f2b5c23502 (diff)
First shot on chunked transfers
Diffstat (limited to 'ot_http.c')
-rw-r--r--ot_http.c93
1 files changed, 56 insertions, 37 deletions
diff --git a/ot_http.c b/ot_http.c
index 61843a8..edcfadb 100644
--- a/ot_http.c
+++ b/ot_http.c
@@ -121,9 +121,10 @@ ssize_t http_issue_error( const int64 sock, struct ot_workstruct *ws, int code )
121 return ws->reply_size = -2; 121 return ws->reply_size = -2;
122} 122}
123 123
124ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ) { 124ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial ) {
125 struct http_data *cookie = io_getcookie( sock ); 125 struct http_data *cookie = io_getcookie( sock );
126 char *header; 126 char *header;
127 const char *encoding = "";
127 int i; 128 int i;
128 size_t header_size, size = iovec_length( &iovec_entries, (const struct iovec **)&iovector ); 129 size_t header_size, size = iovec_length( &iovec_entries, (const struct iovec **)&iovector );
129 tai6464 t; 130 tai6464 t;
@@ -140,54 +141,72 @@ ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iove
140 /* If we came here, wait for the answer is over */ 141 /* If we came here, wait for the answer is over */
141 cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK; 142 cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK;
142 143
143 /* Our answers never are 0 vectors. Return an error. */ 144fprintf(stderr, "http_sendiovecdata sending %d iovec entries found cookie->batch == %p\n", iovec_entries, cookie->batch);
144 if( !iovec_entries ) {
145 HTTPERROR_500;
146 }
147 145
148 /* Prepare space for http header */ 146 if( iovec_entries ) {
149 header = malloc( SUCCESS_HTTP_HEADER_LENGTH + SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING );
150 if( !header ) {
151 iovec_free( &iovec_entries, &iovector );
152 HTTPERROR_500;
153 }
154 147
155 if( cookie->flag & STRUCT_HTTP_FLAG_GZIP ) 148 /* Prepare space for http header */
156 header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: gzip\r\nContent-Length: %zd\r\n\r\n", size ); 149 header = malloc( SUCCESS_HTTP_HEADER_LENGTH + SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING );
157 else if( cookie->flag & STRUCT_HTTP_FLAG_BZIP2 ) 150 if( !header ) {
158 header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: bzip2\r\nContent-Length: %zd\r\n\r\n", size ); 151 iovec_free( &iovec_entries, &iovector );
159 else 152 HTTPERROR_500;
160 header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r\n", size ); 153 }
161 154
162 if (!cookie->batch ) { 155 if( cookie->flag & STRUCT_HTTP_FLAG_GZIP )
163 cookie->batch = malloc( sizeof(io_batch) ); 156 encoding = "Content-Encoding: gzip\r\n";
164 memset( cookie->batch, 0, sizeof(io_batch) ); 157 else if( cookie->flag & STRUCT_HTTP_FLAG_BZIP2 )
165 cookie->batches = 1; 158 encoding = "Content-Encoding: bzip2\r\n";
166 } 159
167 iob_addbuf_free( cookie->batch, header, header_size ); 160 if( !(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED) )
161 header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\n%sContent-Length: %zd\r\n\r\n", encoding, size );
162 else {
163 if ( !(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER )) {
164 header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\n%sTransfer-Encoding: chunked\r\n\r\n%zx\r\n", encoding, size );
165 cookie->flag |= STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER;
166 } else
167 header_size = sprintf( header, "%zx\r\n", size );
168 }
168 169
169 /* Split huge iovectors into separate io_batches */ 170 if (!cookie->batch ) {
170 for( i=0; i<iovec_entries; ++i ) { 171 cookie->batch = malloc( sizeof(io_batch) );
171 io_batch *current = cookie->batch + cookie->batches - 1; 172 memset( cookie->batch, 0, sizeof(io_batch) );
173 cookie->batches = 1;
174 }
175 iob_addbuf_free( cookie->batch, header, header_size );
172 176
173 /* If the current batch's limit is reached, try to reallocate a new batch to work on */ 177 /* Split huge iovectors into separate io_batches */
174 if( current->bytesleft > OT_BATCH_LIMIT ) { 178 for( i=0; i<iovec_entries; ++i ) {
175 io_batch * new_batch = realloc( current, (cookie->batches + 1) * sizeof(io_batch) ); 179 io_batch *current = cookie->batch + cookie->batches - 1;
180
181 /* If the current batch's limit is reached, try to reallocate a new batch to work on */
182 if( current->bytesleft > OT_BATCH_LIMIT ) {
183fprintf(stderr, "http_sendiovecdata found batch above limit: %zd\n", current->bytesleft);
184 io_batch * new_batch = realloc( cookie->batch, (cookie->batches + 1) * sizeof(io_batch) );
176 if( new_batch ) { 185 if( new_batch ) {
177 cookie->batches++; 186 cookie->batch = new_batch;
178 current = cookie->batch = new_batch; 187 current = cookie->batch + cookie->batches++;
179 memset( current, 0, sizeof(io_batch) ); 188 memset( current, 0, sizeof(io_batch) );
180 } 189 }
190 }
191fprintf(stderr, "http_sendiovecdata calling iob_addbuf_free with %zd\n", iovector[i].iov_len);
192 iob_addbuf_free( current, iovector[i].iov_base, iovector[i].iov_len );
181 } 193 }
194 free( iovector );
195 if ( cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER )
196 iob_addbuf(cookie->batch + cookie->batches - 1, "\r\n", 2);
197 }
182 198
183 iob_addbuf_free( current, iovector[i].iov_base, iovector[i].iov_len ); 199 if ((cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER) && cookie->batch && !is_partial) {
200fprintf(stderr, "http_sendiovecdata adds a terminating 0 size buffer to batch\n");
201 iob_addbuf(cookie->batch + cookie->batches - 1, "0\r\n\r\n", 5);
202 cookie->flag &= ~STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER;
184 } 203 }
185 free( iovector );
186 204
187 /* writeable sockets timeout after 10 minutes */ 205 /* writeable sockets timeout after 10 minutes */
188 taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND ); 206 taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND );
189 io_timeout( sock, t ); 207 io_timeout( sock, t );
190 io_dontwantread( sock ); 208 io_dontwantread( sock );
209fprintf (stderr, "http_sendiovecdata marks socket %lld as wantwrite\n", sock);
191 io_wantwrite( sock ); 210 io_wantwrite( sock );
192 return 0; 211 return 0;
193} 212}
@@ -254,7 +273,7 @@ static const ot_keywords keywords_format[] =
254#endif 273#endif
255#endif 274#endif
256 /* Pass this task to the worker thread */ 275 /* Pass this task to the worker thread */
257 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; 276 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED;
258 277
259 /* Clients waiting for us should not easily timeout */ 278 /* Clients waiting for us should not easily timeout */
260 taia_uint( &t, 0 ); io_timeout( sock, t ); 279 taia_uint( &t, 0 ); io_timeout( sock, t );
@@ -278,7 +297,7 @@ static const ot_keywords keywords_format[] =
278} 297}
279 298
280#ifdef WANT_MODEST_FULLSCRAPES 299#ifdef WANT_MODEST_FULLSCRAPES
281static pthread_mutex_t g_modest_fullscrape_mutex = PTHREAD_MUTEX_INITIALIZER; 300static pthread_mutex_t g_modest_fullscrape_mutex = PTHREAD_MUTEX_INITIALIZER;
282static ot_vector g_modest_fullscrape_timeouts; 301static ot_vector g_modest_fullscrape_timeouts;
283typedef struct { ot_ip6 ip; ot_time last_fullscrape; } ot_scrape_log; 302typedef struct { ot_ip6 ip; ot_time last_fullscrape; } ot_scrape_log;
284#endif 303#endif
@@ -325,7 +344,7 @@ static ssize_t http_handle_fullscrape( const int64 sock, struct ot_workstruct *w
325#endif 344#endif
326 345
327 /* Pass this task to the worker thread */ 346 /* Pass this task to the worker thread */
328 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; 347 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED;
329 /* Clients waiting for us should not easily timeout */ 348 /* Clients waiting for us should not easily timeout */
330 taia_uint( &t, 0 ); io_timeout( sock, t ); 349 taia_uint( &t, 0 ); io_timeout( sock, t );
331 fullscrape_deliver( sock, TASK_FULLSCRAPE | format ); 350 fullscrape_deliver( sock, TASK_FULLSCRAPE | format );