summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--opentracker.c9
-rw-r--r--ot_http.c6
-rw-r--r--ot_mutex.c15
3 files changed, 7 insertions, 23 deletions
diff --git a/opentracker.c b/opentracker.c
index 29c4ac7..212defc 100644
--- a/opentracker.c
+++ b/opentracker.c
@@ -217,11 +217,9 @@ static void handle_write( const int64 sock ) {
217 chunked = 1; 217 chunked = 1;
218 218
219 for( i = 0; i < cookie->batches; ++i ) { 219 for( i = 0; i < cookie->batches; ++i ) {
220 fprintf(stderr, "handle_write inspects batch %zu of %zu (bytes left: %llu)\n", i, cookie->batches, cookie->batch[i].bytesleft);
221 if( cookie->batch[i].bytesleft ) { 220 if( cookie->batch[i].bytesleft ) {
222 int64 res = iob_send( sock, cookie->batch + i ); 221 int64 res = iob_send( sock, cookie->batch + i );
223 222
224 fprintf(stderr, "handle_write yields res %lld when trying to iob_send\n", res);
225 if( res == -3 ) { 223 if( res == -3 ) {
226 handle_dead( sock ); 224 handle_dead( sock );
227 return; 225 return;
@@ -237,13 +235,10 @@ static void handle_write( const int64 sock ) {
237 } 235 }
238 236
239 /* In a chunked transfer after all batches accumulated have been sent, wait for the next one */ 237 /* In a chunked transfer after all batches accumulated have been sent, wait for the next one */
240 if( chunked ) { 238 if( chunked )
241fprintf( stderr, "handle_write is STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER => dont want write on sock %lld\n", sock);
242 io_dontwantwrite( sock ); 239 io_dontwantwrite( sock );
243 } else { 240 else
244fprintf( stderr, "handle_write is STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER => handle dead on sock %lld\n", sock);
245 handle_dead( sock ); 241 handle_dead( sock );
246 }
247} 242}
248 243
249static void handle_accept( const int64 serversocket ) { 244static void handle_accept( const int64 serversocket ) {
diff --git a/ot_http.c b/ot_http.c
index 1d4497a..c5d553a 100644
--- a/ot_http.c
+++ b/ot_http.c
@@ -144,8 +144,6 @@ ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iove
144 cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK; 144 cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK;
145 } 145 }
146 146
147fprintf(stderr, "http_sendiovecdata sending %d iovec entries found cookie->batch == %p\n", iovec_entries, cookie->batch);
148
149 if( iovec_entries ) { 147 if( iovec_entries ) {
150 148
151 if( cookie->flag & STRUCT_HTTP_FLAG_GZIP ) 149 if( cookie->flag & STRUCT_HTTP_FLAG_GZIP )
@@ -184,7 +182,6 @@ fprintf(stderr, "http_sendiovecdata sending %d iovec entries found cookie->batch
184 for( i=0; i<iovec_entries; ++i ) { 182 for( i=0; i<iovec_entries; ++i ) {
185 /* If the current batch's limit is reached, try to reallocate a new batch to work on */ 183 /* If the current batch's limit is reached, try to reallocate a new batch to work on */
186 if( current->bytesleft > OT_BATCH_LIMIT ) { 184 if( current->bytesleft > OT_BATCH_LIMIT ) {
187fprintf(stderr, "http_sendiovecdata found batch above limit: %llu\n", current->bytesleft);
188 io_batch * new_batch = realloc( cookie->batch, (cookie->batches + 1) * sizeof(io_batch) ); 185 io_batch * new_batch = realloc( cookie->batch, (cookie->batches + 1) * sizeof(io_batch) );
189 if( new_batch ) { 186 if( new_batch ) {
190 cookie->batch = new_batch; 187 cookie->batch = new_batch;
@@ -192,7 +189,6 @@ fprintf(stderr, "http_sendiovecdata found batch above limit: %llu\n", current->b
192 iob_init_autofree(current ,0); 189 iob_init_autofree(current ,0);
193 } 190 }
194 } 191 }
195fprintf(stderr, "http_sendiovecdata calling iob_addbuf_free with %zd\n", iovector[i].iov_len);
196 iob_addbuf_free( current, iovector[i].iov_base, iovector[i].iov_len ); 192 iob_addbuf_free( current, iovector[i].iov_base, iovector[i].iov_len );
197 } 193 }
198 free( iovector ); 194 free( iovector );
@@ -201,7 +197,6 @@ fprintf(stderr, "http_sendiovecdata calling iob_addbuf_free with %zd\n", iovecto
201 } 197 }
202 198
203 if ((cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER) && cookie->batch && !is_partial) { 199 if ((cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER) && cookie->batch && !is_partial) {
204fprintf(stderr, "http_sendiovecdata adds a terminating 0 size buffer to batch\n");
205 current = cookie->batch + cookie->batches - 1; 200 current = cookie->batch + cookie->batches - 1;
206 iob_addbuf(current, "0\r\n\r\n", 5); 201 iob_addbuf(current, "0\r\n\r\n", 5);
207 cookie->flag &= ~STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER; 202 cookie->flag &= ~STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER;
@@ -210,7 +205,6 @@ fprintf(stderr, "http_sendiovecdata adds a terminating 0 size buffer to batch\n"
210 /* writeable sockets timeout after 10 minutes */ 205 /* writeable sockets timeout after 10 minutes */
211 taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND ); 206 taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND );
212 io_timeout( sock, t ); 207 io_timeout( sock, t );
213fprintf (stderr, "http_sendiovecdata marks socket %lld as wantwrite\n", sock);
214 io_wantwrite( sock ); 208 io_wantwrite( sock );
215 return 0; 209 return 0;
216} 210}
diff --git a/ot_mutex.c b/ot_mutex.c
index 15f3da5..174c4ca 100644
--- a/ot_mutex.c
+++ b/ot_mutex.c
@@ -205,14 +205,12 @@ int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec) {
205 for (task = tasklist; task; task = task->next) 205 for (task = tasklist; task; task = task->next)
206 if (task->taskid == taskid) { 206 if (task->taskid == taskid) {
207 if( iovec ) { 207 if( iovec ) {
208fprintf(stderr, "mutex_workqueue_pushchunked pushing on taskid %lu\n", taskid); 208 if (iovec_append(&task->iovec_entries, &task->iovec, iovec) )
209 if (!iovec_append(&task->iovec_entries, &task->iovec, iovec) ) 209 task->tasktype = TASK_DONE_PARTIAL;
210 return -1; 210 else
211 task->tasktype = TASK_DONE_PARTIAL; 211 task = NULL;
212 } else { 212 } else
213fprintf(stderr, "mutex_workqueue_pushchunked finished taskid %lu\n", taskid);
214 task->tasktype = TASK_DONE; 213 task->tasktype = TASK_DONE;
215 }
216 break; 214 break;
217 } 215 }
218 216
@@ -220,8 +218,6 @@ fprintf(stderr, "mutex_workqueue_pushchunked finished taskid %lu\n", taskid);
220 pthread_mutex_unlock( &tasklist_mutex ); 218 pthread_mutex_unlock( &tasklist_mutex );
221 219
222 io_trywrite( g_self_pipe[1], &byte, 1 ); 220 io_trywrite( g_self_pipe[1], &byte, 1 );
223if(!task)
224fprintf(stderr, "mutex_workqueue_pushchunked taskid %lu not found\n", taskid);
225 221
226 /* Indicate whether the worker has to throw away results */ 222 /* Indicate whether the worker has to throw away results */
227 return task ? 0 : -1; 223 return task ? 0 : -1;
@@ -240,7 +236,6 @@ int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec, int
240 for (task = &tasklist; *task; task = &((*task)->next)) 236 for (task = &tasklist; *task; task = &((*task)->next))
241 if (((*task)->tasktype & TASK_CLASS_MASK ) == TASK_DONE) { 237 if (((*task)->tasktype & TASK_CLASS_MASK ) == TASK_DONE) {
242 struct ot_task *ptask = *task; 238 struct ot_task *ptask = *task;
243fprintf(stderr, "Got task %lu type %d with %d entries\n", (*task)->taskid, (*task)->tasktype, ptask->iovec_entries);
244 *iovec_entries = ptask->iovec_entries; 239 *iovec_entries = ptask->iovec_entries;
245 *iovec = ptask->iovec; 240 *iovec = ptask->iovec;
246 sock = ptask->sock; 241 sock = ptask->sock;