summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDirk Engling <erdgeist@erdgeist.org>2024-04-15 00:39:02 +0200
committerDirk Engling <erdgeist@erdgeist.org>2024-04-15 00:39:02 +0200
commit7c633c259ebc4a863c5076462c5792ecb8b9f617 (patch)
tree550a272a82325c554923c7498811cb8971aa49c2
parent4c5935c0574481dc4b0e0bf57528dc3069e34742 (diff)
clang-format
-rw-r--r--opentracker.c829
-rw-r--r--ot_accesslist.c316
-rw-r--r--ot_accesslist.h47
-rw-r--r--ot_clean.c102
-rw-r--r--ot_clean.h10
-rw-r--r--ot_fullscrape.c287
-rw-r--r--ot_fullscrape.h8
-rw-r--r--ot_http.c744
-rw-r--r--ot_http.h6
-rw-r--r--ot_iovec.c71
-rw-r--r--ot_iovec.h12
-rw-r--r--ot_livesync.c172
-rw-r--r--ot_livesync.h8
-rw-r--r--ot_mutex.c165
-rw-r--r--ot_mutex.h115
-rw-r--r--ot_stats.c971
-rw-r--r--ot_stats.h20
-rw-r--r--ot_udp.c263
-rw-r--r--ot_udp.h4
-rw-r--r--ot_vector.c236
-rw-r--r--ot_vector.h23
-rw-r--r--scan_urlencoded_query.c97
-rw-r--r--scan_urlencoded_query.h6
-rw-r--r--trackerlogic.c568
-rw-r--r--trackerlogic.h153
25 files changed, 2712 insertions, 2521 deletions
diff --git a/opentracker.c b/opentracker.c
index a896762..392f6df 100644
--- a/opentracker.c
+++ b/opentracker.c
@@ -5,59 +5,59 @@
5 $Id$ */ 5 $Id$ */
6 6
7/* System */ 7/* System */
8#include <stdlib.h>
9#include <string.h>
10#include <arpa/inet.h> 8#include <arpa/inet.h>
11#include <sys/socket.h> 9#include <ctype.h>
12#include <unistd.h>
13#include <errno.h> 10#include <errno.h>
11#include <pthread.h>
12#include <pwd.h>
14#include <signal.h> 13#include <signal.h>
15#include <stdio.h> 14#include <stdio.h>
16#include <pwd.h> 15#include <stdlib.h>
17#include <ctype.h> 16#include <string.h>
18#include <pthread.h> 17#include <sys/socket.h>
18#include <unistd.h>
19#ifdef WANT_SYSLOGS 19#ifdef WANT_SYSLOGS
20#include <syslog.h> 20#include <syslog.h>
21#endif 21#endif
22 22
23/* Libowfat */ 23/* Libowfat */
24#include "socket.h" 24#include "byte.h"
25#include "io.h" 25#include "io.h"
26#include "iob.h" 26#include "iob.h"
27#include "byte.h"
28#include "scan.h"
29#include "ip6.h" 27#include "ip6.h"
28#include "scan.h"
29#include "socket.h"
30 30
31/* Opentracker */ 31/* Opentracker */
32#include "trackerlogic.h"
33#include "ot_mutex.h"
34#include "ot_http.h"
35#include "ot_udp.h"
36#include "ot_accesslist.h" 32#include "ot_accesslist.h"
37#include "ot_stats.h" 33#include "ot_http.h"
38#include "ot_livesync.h" 34#include "ot_livesync.h"
35#include "ot_mutex.h"
36#include "ot_stats.h"
37#include "ot_udp.h"
38#include "trackerlogic.h"
39 39
40/* Globals */ 40/* Globals */
41time_t g_now_seconds; 41time_t g_now_seconds;
42char * g_redirecturl; 42char *g_redirecturl;
43uint32_t g_tracker_id; 43uint32_t g_tracker_id;
44volatile int g_opentracker_running = 1; 44volatile int g_opentracker_running = 1;
45int g_self_pipe[2]; 45int g_self_pipe[2];
46 46
47static char * g_serverdir; 47static char *g_serverdir;
48static char * g_serveruser; 48static char *g_serveruser;
49static unsigned int g_udp_workers; 49static unsigned int g_udp_workers;
50 50
51static void panic( const char *routine ) __attribute__ ((noreturn)); 51static void panic(const char *routine) __attribute__((noreturn));
52static void panic( const char *routine ) { 52static void panic(const char *routine) {
53 fprintf( stderr, "%s: %s\n", routine, strerror(errno) ); 53 fprintf(stderr, "%s: %s\n", routine, strerror(errno));
54 exit( 111 ); 54 exit(111);
55} 55}
56 56
57static void signal_handler( int s ) { 57static void signal_handler(int s) {
58 if( s == SIGINT ) { 58 if (s == SIGINT) {
59 /* Any new interrupt signal quits the application */ 59 /* Any new interrupt signal quits the application */
60 signal( SIGINT, SIG_DFL); 60 signal(SIGINT, SIG_DFL);
61 61
62 /* Tell all other threads to not acquire any new lock on a bucket 62 /* Tell all other threads to not acquire any new lock on a bucket
63 but cancel their operations and return */ 63 but cancel their operations and return */
@@ -69,61 +69,63 @@ static void signal_handler( int s ) {
69 closelog(); 69 closelog();
70#endif 70#endif
71 71
72 exit( 0 ); 72 exit(0);
73 } 73 }
74} 74}
75 75
76static void defaul_signal_handlers( void ) { 76static void defaul_signal_handlers(void) {
77 sigset_t signal_mask; 77 sigset_t signal_mask;
78 sigemptyset(&signal_mask); 78 sigemptyset(&signal_mask);
79 sigaddset (&signal_mask, SIGPIPE); 79 sigaddset(&signal_mask, SIGPIPE);
80 sigaddset (&signal_mask, SIGHUP); 80 sigaddset(&signal_mask, SIGHUP);
81 sigaddset (&signal_mask, SIGINT); 81 sigaddset(&signal_mask, SIGINT);
82 sigaddset (&signal_mask, SIGALRM); 82 sigaddset(&signal_mask, SIGALRM);
83 pthread_sigmask (SIG_BLOCK, &signal_mask, NULL); 83 pthread_sigmask(SIG_BLOCK, &signal_mask, NULL);
84} 84}
85 85
86static void install_signal_handlers( void ) { 86static void install_signal_handlers(void) {
87 struct sigaction sa; 87 struct sigaction sa;
88 sigset_t signal_mask; 88 sigset_t signal_mask;
89 sigemptyset(&signal_mask); 89 sigemptyset(&signal_mask);
90 90
91 sa.sa_handler = signal_handler; 91 sa.sa_handler = signal_handler;
92 sigemptyset(&sa.sa_mask); 92 sigemptyset(&sa.sa_mask);
93 sa.sa_flags = SA_RESTART; 93 sa.sa_flags = SA_RESTART;
94 if ((sigaction(SIGINT, &sa, NULL) == -1) || (sigaction(SIGALRM, &sa, NULL) == -1) ) 94 if ((sigaction(SIGINT, &sa, NULL) == -1) || (sigaction(SIGALRM, &sa, NULL) == -1))
95 panic( "install_signal_handlers" ); 95 panic("install_signal_handlers");
96 96
97 sigaddset (&signal_mask, SIGINT); 97 sigaddset(&signal_mask, SIGINT);
98 pthread_sigmask (SIG_UNBLOCK, &signal_mask, NULL); 98 pthread_sigmask(SIG_UNBLOCK, &signal_mask, NULL);
99} 99}
100 100
101static void usage( char *name ) { 101static void usage(char *name) {
102 fprintf( stderr, "Usage: %s [-i ip] [-p port] [-P port] [-r redirect] [-d dir] [-u user] [-A ip[/bits]] [-f config] [-s livesyncport]" 102 fprintf(stderr,
103 "Usage: %s [-i ip] [-p port] [-P port] [-r redirect] [-d dir] [-u user] [-A ip[/bits]] [-f config] [-s livesyncport]"
103#ifdef WANT_ACCESSLIST_BLACK 104#ifdef WANT_ACCESSLIST_BLACK
104 " [-b blacklistfile]" 105 " [-b blacklistfile]"
105#elif defined ( WANT_ACCESSLIST_WHITE ) 106#elif defined(WANT_ACCESSLIST_WHITE)
106 " [-w whitelistfile]" 107 " [-w whitelistfile]"
107#endif 108#endif
108 "\n", name ); 109 "\n",
110 name);
109} 111}
110 112
111#define HELPLINE(opt,desc) fprintf(stderr, "\t%-10s%s\n",opt,desc) 113#define HELPLINE(opt, desc) fprintf(stderr, "\t%-10s%s\n", opt, desc)
112static void help( char *name ) { 114static void help(char *name) {
113 usage( name ); 115 usage(name);
114 116
115 HELPLINE("-f config","include and execute the config file"); 117 HELPLINE("-f config", "include and execute the config file");
116 HELPLINE("-i ip","specify ip to bind to with next -[pP] (default: any, overrides preceeding ones)"); 118 HELPLINE("-i ip", "specify ip to bind to with next -[pP] (default: any, overrides preceeding ones)");
117 HELPLINE("-p port","do bind to tcp port (default: 6969, you may specify more than one)"); 119 HELPLINE("-p port", "do bind to tcp port (default: 6969, you may specify more than one)");
118 HELPLINE("-P port","do bind to udp port (default: 6969, you may specify more than one)"); 120 HELPLINE("-P port", "do bind to udp port (default: 6969, you may specify more than one)");
119 HELPLINE("-r redirecturl","specify url where / should be redirected to (default none)"); 121 HELPLINE("-r redirecturl", "specify url where / should be redirected to (default none)");
120 HELPLINE("-d dir","specify directory to try to chroot to (default: \".\")"); 122 HELPLINE("-d dir", "specify directory to try to chroot to (default: \".\")");
121 HELPLINE("-u user","specify user under whose privileges opentracker should run (default: \"nobody\")"); 123 HELPLINE("-u user", "specify user under whose privileges opentracker should run (default: \"nobody\")");
122 HELPLINE("-A ip[/bits]","bless an ip address or net as admin address (e.g. to allow syncs from this address)"); 124 HELPLINE("-A ip[/bits]", "bless an ip address or net as admin address (e.g. to allow syncs from this address)");
123#ifdef WANT_ACCESSLIST_BLACK 125#ifdef WANT_ACCESSLIST_BLACK
124 HELPLINE("-b file","specify blacklist file."); 126 HELPLINE("-b file", "specify blacklist file.");
125#elif defined( WANT_ACCESSLIST_WHITE ) 127#elif defined(WANT_ACCESSLIST_WHITE)
126 HELPLINE("-w file","specify whitelist file."); 128 HELPLINE("-w file", "specify whitelist file.");
127#endif 129#endif
128 130
129 fprintf(stderr, "\nExample: ./opentracker -i 127.0.0.1 -p 6969 -P 6969 -f ./opentracker.conf -i 10.1.1.23 -p 2710 -p 80\n"); 131 fprintf(stderr, "\nExample: ./opentracker -i 127.0.0.1 -p 6969 -P 6969 -f ./opentracker.conf -i 10.1.1.23 -p 2710 -p 80\n");
@@ -133,165 +135,165 @@ static void help( char *name ) {
133} 135}
134#undef HELPLINE 136#undef HELPLINE
135 137
136static ssize_t header_complete( char * request, ssize_t byte_count ) { 138static ssize_t header_complete(char *request, ssize_t byte_count) {
137 ssize_t i = 0, state = 0; 139 ssize_t i = 0, state = 0;
138 140
139 for( i=1; i < byte_count; i+=2 ) 141 for (i = 1; i < byte_count; i += 2)
140 if( request[i] <= 13 ) { 142 if (request[i] <= 13) {
141 i--; 143 i--;
142 for( state = 0 ; i < byte_count; ++i ) { 144 for (state = 0; i < byte_count; ++i) {
143 char c = request[i]; 145 char c = request[i];
144 if( c == '\r' || c == '\n' ) 146 if (c == '\r' || c == '\n')
145 state = ( state >> 2 ) | ( ( c << 6 ) & 0xc0 ); 147 state = (state >> 2) | ((c << 6) & 0xc0);
146 else 148 else
147 break; 149 break;
148 if( state >= 0xa0 || state == 0x99 ) return i + 1; 150 if (state >= 0xa0 || state == 0x99)
151 return i + 1;
149 } 152 }
150 } 153 }
151 return 0; 154 return 0;
152} 155}
153 156
154static void handle_dead( const int64 sock ) { 157static void handle_dead(const int64 sock) {
155 struct http_data* cookie=io_getcookie( sock ); 158 struct http_data *cookie = io_getcookie(sock);
156 if( cookie ) { 159 if (cookie) {
157 size_t i; 160 size_t i;
158 for ( i = 0; i < cookie->batches; ++i) 161 for (i = 0; i < cookie->batches; ++i)
159 iob_reset( cookie->batch + i ); 162 iob_reset(cookie->batch + i);
160 free( cookie->batch ); 163 free(cookie->batch);
161 array_reset( &cookie->request ); 164 array_reset(&cookie->request);
162 if( cookie->flag & (STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER) ) 165 if (cookie->flag & (STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER))
163 mutex_workqueue_canceltask( sock ); 166 mutex_workqueue_canceltask(sock);
164 free( cookie ); 167 free(cookie);
165 } 168 }
166 io_close( sock ); 169 io_close(sock);
167} 170}
168 171
169static void handle_read( const int64 sock, struct ot_workstruct *ws ) { 172static void handle_read(const int64 sock, struct ot_workstruct *ws) {
170 struct http_data* cookie = io_getcookie( sock ); 173 struct http_data *cookie = io_getcookie(sock);
171 ssize_t byte_count = io_tryread( sock, ws->inbuf, G_INBUF_SIZE ); 174 ssize_t byte_count = io_tryread(sock, ws->inbuf, G_INBUF_SIZE);
172 175
173 if( byte_count == 0 || byte_count == -3 ) { 176 if (byte_count == 0 || byte_count == -3) {
174 handle_dead( sock ); 177 handle_dead(sock);
175 return; 178 return;
176 } 179 }
177 180
178 if( byte_count == -1) 181 if (byte_count == -1)
179 return; 182 return;
180 183
181 /* If we get the whole request in one packet, handle it without copying */ 184 /* If we get the whole request in one packet, handle it without copying */
182 if( !array_start( &cookie->request ) ) { 185 if (!array_start(&cookie->request)) {
183 if( ( ws->header_size = header_complete( ws->inbuf, byte_count ) ) ) { 186 if ((ws->header_size = header_complete(ws->inbuf, byte_count))) {
184 ws->request = ws->inbuf; 187 ws->request = ws->inbuf;
185 ws->request_size = byte_count; 188 ws->request_size = byte_count;
186 http_handle_request( sock, ws ); 189 http_handle_request(sock, ws);
187 } else 190 } else
188 array_catb( &cookie->request, ws->inbuf, (size_t)byte_count ); 191 array_catb(&cookie->request, ws->inbuf, (size_t)byte_count);
189 return; 192 return;
190 } 193 }
191 194
192 array_catb( &cookie->request, ws->inbuf, byte_count ); 195 array_catb(&cookie->request, ws->inbuf, byte_count);
193 if( array_failed( &cookie->request ) || array_bytes( &cookie->request ) > 8192 ) { 196 if (array_failed(&cookie->request) || array_bytes(&cookie->request) > 8192) {
194 http_issue_error( sock, ws, CODE_HTTPERROR_500 ); 197 http_issue_error(sock, ws, CODE_HTTPERROR_500);
195 return; 198 return;
196 } 199 }
197 200
198 while( ( ws->header_size = header_complete( array_start( &cookie->request ), array_bytes( &cookie->request ) ) ) ) { 201 while ((ws->header_size = header_complete(array_start(&cookie->request), array_bytes(&cookie->request)))) {
199 ws->request = array_start( &cookie->request ); 202 ws->request = array_start(&cookie->request);
200 ws->request_size = array_bytes( &cookie->request ); 203 ws->request_size = array_bytes(&cookie->request);
201 http_handle_request( sock, ws ); 204 http_handle_request(sock, ws);
202#ifdef WANT_KEEPALIVE 205#ifdef WANT_KEEPALIVE
203 if( !ws->keep_alive ) 206 if (!ws->keep_alive)
204#endif 207#endif
205 return; 208 return;
206 } 209 }
207} 210}
208 211
209static void handle_write( const int64 sock ) { 212static void handle_write(const int64 sock) {
210 struct http_data* cookie=io_getcookie( sock ); 213 struct http_data *cookie = io_getcookie(sock);
211 size_t i; 214 size_t i;
212 int chunked = 0; 215 int chunked = 0;
213 216
214 /* Look for the first io_batch still containing bytes to write */ 217 /* Look for the first io_batch still containing bytes to write */
215 if( cookie ) { 218 if (cookie) {
216 if( cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER ) 219 if (cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER)
217 chunked = 1; 220 chunked = 1;
218 221
219 for( i = 0; i < cookie->batches; ++i ) { 222 for (i = 0; i < cookie->batches; ++i) {
220 if( cookie->batch[i].bytesleft ) { 223 if (cookie->batch[i].bytesleft) {
221 int64 res = iob_send( sock, cookie->batch + i ); 224 int64 res = iob_send(sock, cookie->batch + i);
222 225
223 if( res == -3 ) { 226 if (res == -3) {
224 handle_dead( sock ); 227 handle_dead(sock);
225 return; 228 return;
226 } 229 }
227 230
228 if( !cookie->batch[i].bytesleft ) 231 if (!cookie->batch[i].bytesleft)
229 continue; 232 continue;
230 233
231 if( res == -1 || res > 0 || i < cookie->batches - 1 ) 234 if (res == -1 || res > 0 || i < cookie->batches - 1)
232 return; 235 return;
233 } 236 }
234 } 237 }
235 } 238 }
236 239
237 /* In a chunked transfer after all batches accumulated have been sent, wait for the next one */ 240 /* In a chunked transfer after all batches accumulated have been sent, wait for the next one */
238 if( chunked ) 241 if (chunked)
239 io_dontwantwrite( sock ); 242 io_dontwantwrite(sock);
240 else 243 else
241 handle_dead( sock ); 244 handle_dead(sock);
242} 245}
243 246
244static void handle_accept( const int64 serversocket ) { 247static void handle_accept(const int64 serversocket) {
245 struct http_data *cookie; 248 struct http_data *cookie;
246 int64 sock; 249 int64 sock;
247 ot_ip6 ip; 250 ot_ip6 ip;
248 uint16 port; 251 uint16 port;
249 tai6464 t; 252 tai6464 t;
250 253
251 while( ( sock = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) { 254 while ((sock = socket_accept6(serversocket, ip, &port, NULL)) != -1) {
252 255
253 /* Put fd into a non-blocking mode */ 256 /* Put fd into a non-blocking mode */
254 io_nonblock( sock ); 257 io_nonblock(sock);
255 258
256 if( !io_fd( sock ) || 259 if (!io_fd(sock) || !(cookie = (struct http_data *)malloc(sizeof(struct http_data)))) {
257 !( cookie = (struct http_data*)malloc( sizeof(struct http_data) ) ) ) { 260 io_close(sock);
258 io_close( sock );
259 continue; 261 continue;
260 } 262 }
261 memset(cookie, 0, sizeof( struct http_data ) ); 263 memset(cookie, 0, sizeof(struct http_data));
262 memcpy(cookie->ip,ip,sizeof(ot_ip6)); 264 memcpy(cookie->ip, ip, sizeof(ot_ip6));
263 265
264 io_setcookie( sock, cookie ); 266 io_setcookie(sock, cookie);
265 io_wantread( sock ); 267 io_wantread(sock);
266 268
267 stats_issue_event( EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip); 269 stats_issue_event(EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip);
268 270
269 /* That breaks taia encapsulation. But there is no way to take system 271 /* That breaks taia encapsulation. But there is no way to take system
270 time this often in FreeBSD and libowfat does not allow to set unix time */ 272 time this often in FreeBSD and libowfat does not allow to set unix time */
271 taia_uint( &t, 0 ); /* Clear t */ 273 taia_uint(&t, 0); /* Clear t */
272 tai_unix( &(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT) ); 274 tai_unix(&(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT));
273 io_timeout( sock, t ); 275 io_timeout(sock, t);
274 } 276 }
275 io_eagain(serversocket); 277 io_eagain(serversocket);
276} 278}
277 279
278static void * server_mainloop( void * args ) { 280static void *server_mainloop(void *args) {
279 struct ot_workstruct ws; 281 struct ot_workstruct ws;
280 time_t next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; 282 time_t next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL;
281 struct iovec *iovector; 283 struct iovec *iovector;
282 int iovec_entries, is_partial; 284 int iovec_entries, is_partial;
283 285
284 (void)args; 286 (void)args;
285 287
286 /* Initialize our "thread local storage" */ 288 /* Initialize our "thread local storage" */
287 ws.inbuf = malloc( G_INBUF_SIZE ); 289 ws.inbuf = malloc(G_INBUF_SIZE);
288 ws.outbuf = malloc( G_OUTBUF_SIZE ); 290 ws.outbuf = malloc(G_OUTBUF_SIZE);
289#ifdef _DEBUG_HTTPERROR 291#ifdef _DEBUG_HTTPERROR
290 ws.debugbuf= malloc( G_DEBUGBUF_SIZE ); 292 ws.debugbuf = malloc(G_DEBUGBUF_SIZE);
291#endif 293#endif
292 294
293 if( !ws.inbuf || !ws.outbuf ) 295 if (!ws.inbuf || !ws.outbuf)
294 panic( "Initializing worker failed" ); 296 panic("Initializing worker failed");
295 297
296#ifdef WANT_ARC4RANDOM 298#ifdef WANT_ARC4RANDOM
297 arc4random_buf(&ws.rand48_state[0], 3 * sizeof(uint16_t)); 299 arc4random_buf(&ws.rand48_state[0], 3 * sizeof(uint16_t));
@@ -301,32 +303,32 @@ static void * server_mainloop( void * args ) {
301 ws.rand48_state[2] = (uint16_t)random(); 303 ws.rand48_state[2] = (uint16_t)random();
302#endif 304#endif
303 305
304 for( ; ; ) { 306 for (;;) {
305 int64 sock; 307 int64 sock;
306 308
307 io_wait(); 309 io_wait();
308 310
309 while( ( sock = io_canread( ) ) != -1 ) { 311 while ((sock = io_canread()) != -1) {
310 const void *cookie = io_getcookie( sock ); 312 const void *cookie = io_getcookie(sock);
311 if( (intptr_t)cookie == FLAG_TCP ) 313 if ((intptr_t)cookie == FLAG_TCP)
312 handle_accept( sock ); 314 handle_accept(sock);
313 else if( (intptr_t)cookie == FLAG_UDP ) 315 else if ((intptr_t)cookie == FLAG_UDP)
314 handle_udp6( sock, &ws ); 316 handle_udp6(sock, &ws);
315 else if( (intptr_t)cookie == FLAG_SELFPIPE ) 317 else if ((intptr_t)cookie == FLAG_SELFPIPE)
316 io_tryread( sock, ws.inbuf, G_INBUF_SIZE ); 318 io_tryread(sock, ws.inbuf, G_INBUF_SIZE);
317 else 319 else
318 handle_read( sock, &ws ); 320 handle_read(sock, &ws);
319 } 321 }
320 322
321 while( ( sock = mutex_workqueue_popresult( &iovec_entries, &iovector, &is_partial ) ) != -1 ) 323 while ((sock = mutex_workqueue_popresult(&iovec_entries, &iovector, &is_partial)) != -1)
322 http_sendiovecdata( sock, &ws, iovec_entries, iovector, is_partial ); 324 http_sendiovecdata(sock, &ws, iovec_entries, iovector, is_partial);
323 325
324 while( ( sock = io_canwrite( ) ) != -1 ) 326 while ((sock = io_canwrite()) != -1)
325 handle_write( sock ); 327 handle_write(sock);
326 328
327 if( g_now_seconds > next_timeout_check ) { 329 if (g_now_seconds > next_timeout_check) {
328 while( ( sock = io_timeouted() ) != -1 ) 330 while ((sock = io_timeouted()) != -1)
329 handle_dead( sock ); 331 handle_dead(sock);
330 next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; 332 next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL;
331 } 333 }
332 334
@@ -335,276 +337,298 @@ static void * server_mainloop( void * args ) {
335 return 0; 337 return 0;
336} 338}
337 339
338static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) { 340static int64_t ot_try_bind(ot_ip6 ip, uint16_t port, PROTO_FLAG proto) {
339 int64 sock = proto == FLAG_TCP ? socket_tcp6( ) : socket_udp6( ); 341 int64 sock = proto == FLAG_TCP ? socket_tcp6() : socket_udp6();
340 342
341#ifdef _DEBUG 343#ifdef _DEBUG
342 { 344 {
343 char *protos[] = {"TCP","UDP","UDP mcast"}; 345 char *protos[] = {"TCP", "UDP", "UDP mcast"};
344 char _debug[512]; 346 char _debug[512];
345 int off = snprintf( _debug, sizeof(_debug), "Binding socket type %s to address [", protos[proto] ); 347 int off = snprintf(_debug, sizeof(_debug), "Binding socket type %s to address [", protos[proto]);
346 off += fmt_ip6c( _debug+off, ip); 348 off += fmt_ip6c(_debug + off, ip);
347 snprintf( _debug + off, sizeof(_debug)-off, "]:%d...", port); 349 snprintf(_debug + off, sizeof(_debug) - off, "]:%d...", port);
348 fputs( _debug, stderr ); 350 fputs(_debug, stderr);
349 } 351 }
350#endif 352#endif
351 353
352 if( socket_bind6_reuse( sock, ip, port, 0 ) == -1 ) 354 if (socket_bind6_reuse(sock, ip, port, 0) == -1)
353 panic( "socket_bind6_reuse" ); 355 panic("socket_bind6_reuse");
354 356
355 if( ( proto == FLAG_TCP ) && ( socket_listen( sock, SOMAXCONN) == -1 ) ) 357 if ((proto == FLAG_TCP) && (socket_listen(sock, SOMAXCONN) == -1))
356 panic( "socket_listen" ); 358 panic("socket_listen");
357 359
358 if( !io_fd( sock ) ) 360 if (!io_fd(sock))
359 panic( "io_fd" ); 361 panic("io_fd");
360 362
361 io_setcookie( sock, (void*)proto ); 363 io_setcookie(sock, (void *)proto);
362 364
363 if( (proto == FLAG_UDP) && g_udp_workers ) { 365 if ((proto == FLAG_UDP) && g_udp_workers) {
364 io_block( sock ); 366 io_block(sock);
365 udp_init( sock, g_udp_workers ); 367 udp_init(sock, g_udp_workers);
366 } else 368 } else
367 io_wantread( sock ); 369 io_wantread(sock);
368 370
369#ifdef _DEBUG 371#ifdef _DEBUG
370 fputs( " success.\n", stderr); 372 fputs(" success.\n", stderr);
371#endif 373#endif
372 374
373 return sock; 375 return sock;
374} 376}
375 377
376char * set_config_option( char **option, char *value ) { 378char *set_config_option(char **option, char *value) {
377#ifdef _DEBUG 379#ifdef _DEBUG
378 fprintf( stderr, "Setting config option: %s\n", value ); 380 fprintf(stderr, "Setting config option: %s\n", value);
379#endif 381#endif
380 while( isspace(*value) ) ++value; 382 while (isspace(*value))
381 free( *option ); 383 ++value;
382 return *option = strdup( value ); 384 free(*option);
385 return *option = strdup(value);
383} 386}
384 387
385static int scan_ip6_port( const char *src, ot_ip6 ip, uint16 *port ) { 388static int scan_ip6_port(const char *src, ot_ip6 ip, uint16 *port) {
386 const char *s = src; 389 const char *s = src;
387 int off, bracket = 0; 390 int off, bracket = 0;
388 while( isspace(*s) ) ++s; 391 while (isspace(*s))
389 if( *s == '[' ) ++s, ++bracket; /* for v6 style notation */ 392 ++s;
390 if( !(off = scan_ip6( s, ip ) ) ) 393 if (*s == '[')
394 ++s, ++bracket; /* for v6 style notation */
395 if (!(off = scan_ip6(s, ip)))
391 return 0; 396 return 0;
392 s += off; 397 s += off;
393 if( bracket && *s == ']' ) ++s; 398 if (bracket && *s == ']')
394 if( *s == 0 || isspace(*s)) return s-src; 399 ++s;
395 if( !ip6_isv4mapped(ip)) { 400 if (*s == 0 || isspace(*s))
396 if( *s != ':' && *s != '.' ) return 0; 401 return s - src;
397 if( !bracket && *(s) == ':' ) return 0; 402 if (!ip6_isv4mapped(ip)) {
403 if (*s != ':' && *s != '.')
404 return 0;
405 if (!bracket && *(s) == ':')
406 return 0;
398 s++; 407 s++;
399 } else { 408 } else {
400 if( *(s++) != ':' ) return 0; 409 if (*(s++) != ':')
410 return 0;
401 } 411 }
402 if( !(off = scan_ushort (s, port ) ) ) 412 if (!(off = scan_ushort(s, port)))
403 return 0; 413 return 0;
404 return off+s-src; 414 return off + s - src;
405} 415}
406 416
407static int scan_ip6_net( const char *src, ot_net *net) { 417static int scan_ip6_net(const char *src, ot_net *net) {
408 const char *s = src; 418 const char *s = src;
409 int off; 419 int off;
410 while( isspace(*s) ) ++s; 420 while (isspace(*s))
411 if( !(off = scan_ip6( s, net->address ) ) ) 421 ++s;
422 if (!(off = scan_ip6(s, net->address)))
412 return 0; 423 return 0;
413 s += off; 424 s += off;
414 if(*s!='/') 425 if (*s != '/')
415 net->bits = 128; 426 net->bits = 128;
416 else { 427 else {
417 s++; 428 s++;
418 if( !(off = scan_int (s, &net->bits ) ) ) 429 if (!(off = scan_int(s, &net->bits)))
419 return 0; 430 return 0;
420 if( ip6_isv4mapped(net->address)) 431 if (ip6_isv4mapped(net->address))
421 net->bits += 96; 432 net->bits += 96;
422 if(net->bits > 128) 433 if (net->bits > 128)
423 return 0; 434 return 0;
424 s += off; 435 s += off;
425 } 436 }
426 return off+s-src; 437 return off + s - src;
427} 438}
428 439
429int parse_configfile( char * config_filename ) { 440int parse_configfile(char *config_filename) {
430 FILE * accesslist_filehandle; 441 FILE *accesslist_filehandle;
431 char inbuf[512]; 442 char inbuf[512];
432 ot_ip6 tmpip; 443 ot_ip6 tmpip;
433#if defined(WANT_RESTRICT_STATS) || defined(WANT_IP_FROM_PROXY) || defined(WANT_SYNC_LIVE) 444#if defined(WANT_RESTRICT_STATS) || defined(WANT_IP_FROM_PROXY) || defined(WANT_SYNC_LIVE)
434 ot_net tmpnet; 445 ot_net tmpnet;
435#endif 446#endif
436 int bound = 0; 447 int bound = 0;
437 448
438 accesslist_filehandle = fopen( config_filename, "r" ); 449 accesslist_filehandle = fopen(config_filename, "r");
439 450
440 if( accesslist_filehandle == NULL ) { 451 if (accesslist_filehandle == NULL) {
441 fprintf( stderr, "Warning: Can't open config file: %s.", config_filename ); 452 fprintf(stderr, "Warning: Can't open config file: %s.", config_filename);
442 return 0; 453 return 0;
443 } 454 }
444 455
445 while( fgets( inbuf, sizeof(inbuf), accesslist_filehandle ) ) { 456 while (fgets(inbuf, sizeof(inbuf), accesslist_filehandle)) {
446 char *p = inbuf; 457 char *p = inbuf;
447 size_t strl; 458 size_t strl;
448 459
449 /* Skip white spaces */ 460 /* Skip white spaces */
450 while(isspace(*p)) ++p; 461 while (isspace(*p))
462 ++p;
451 463
452 /* Ignore comments and empty lines */ 464 /* Ignore comments and empty lines */
453 if((*p=='#')||(*p=='\n')||(*p==0)) continue; 465 if ((*p == '#') || (*p == '\n') || (*p == 0))
466 continue;
454 467
455 /* consume trailing new lines and spaces */ 468 /* consume trailing new lines and spaces */
456 strl = strlen(p); 469 strl = strlen(p);
457 while( strl && isspace(p[strl-1])) 470 while (strl && isspace(p[strl - 1]))
458 p[--strl] = 0; 471 p[--strl] = 0;
459 472
460 /* Scan for commands */ 473 /* Scan for commands */
461 if(!byte_diff(p,15,"tracker.rootdir" ) && isspace(p[15])) { 474 if (!byte_diff(p, 15, "tracker.rootdir") && isspace(p[15])) {
462 set_config_option( &g_serverdir, p+16 ); 475 set_config_option(&g_serverdir, p + 16);
463 } else if(!byte_diff(p,12,"tracker.user" ) && isspace(p[12])) { 476 } else if (!byte_diff(p, 12, "tracker.user") && isspace(p[12])) {
464 set_config_option( &g_serveruser, p+13 ); 477 set_config_option(&g_serveruser, p + 13);
465 } else if(!byte_diff(p,14,"listen.tcp_udp" ) && isspace(p[14])) { 478 } else if (!byte_diff(p, 14, "listen.tcp_udp") && isspace(p[14])) {
466 uint16_t tmpport = 6969; 479 uint16_t tmpport = 6969;
467 if( !scan_ip6_port( p+15, tmpip, &tmpport )) goto parse_error; 480 if (!scan_ip6_port(p + 15, tmpip, &tmpport))
468 ot_try_bind( tmpip, tmpport, FLAG_TCP ); ++bound; 481 goto parse_error;
469 ot_try_bind( tmpip, tmpport, FLAG_UDP ); ++bound; 482 ot_try_bind(tmpip, tmpport, FLAG_TCP);
470 } else if(!byte_diff(p,10,"listen.tcp" ) && isspace(p[10])) { 483 ++bound;
484 ot_try_bind(tmpip, tmpport, FLAG_UDP);
485 ++bound;
486 } else if (!byte_diff(p, 10, "listen.tcp") && isspace(p[10])) {
471 uint16_t tmpport = 6969; 487 uint16_t tmpport = 6969;
472 if( !scan_ip6_port( p+11, tmpip, &tmpport )) goto parse_error; 488 if (!scan_ip6_port(p + 11, tmpip, &tmpport))
473 ot_try_bind( tmpip, tmpport, FLAG_TCP ); 489 goto parse_error;
490 ot_try_bind(tmpip, tmpport, FLAG_TCP);
474 ++bound; 491 ++bound;
475 } else if(!byte_diff(p, 10, "listen.udp" ) && isspace(p[10])) { 492 } else if (!byte_diff(p, 10, "listen.udp") && isspace(p[10])) {
476 uint16_t tmpport = 6969; 493 uint16_t tmpport = 6969;
477 if( !scan_ip6_port( p+11, tmpip, &tmpport )) goto parse_error; 494 if (!scan_ip6_port(p + 11, tmpip, &tmpport))
478 ot_try_bind( tmpip, tmpport, FLAG_UDP ); 495 goto parse_error;
496 ot_try_bind(tmpip, tmpport, FLAG_UDP);
479 ++bound; 497 ++bound;
480 } else if(!byte_diff(p,18,"listen.udp.workers" ) && isspace(p[18])) { 498 } else if (!byte_diff(p, 18, "listen.udp.workers") && isspace(p[18])) {
481 char *value = p + 18; 499 char *value = p + 18;
482 while( isspace(*value) ) ++value; 500 while (isspace(*value))
483 scan_uint( value, &g_udp_workers ); 501 ++value;
502 scan_uint(value, &g_udp_workers);
484#ifdef WANT_ACCESSLIST_WHITE 503#ifdef WANT_ACCESSLIST_WHITE
485 } else if(!byte_diff(p, 16, "access.whitelist" ) && isspace(p[16])) { 504 } else if (!byte_diff(p, 16, "access.whitelist") && isspace(p[16])) {
486 set_config_option( &g_accesslist_filename, p+17 ); 505 set_config_option(&g_accesslist_filename, p + 17);
487#elif defined( WANT_ACCESSLIST_BLACK ) 506#elif defined(WANT_ACCESSLIST_BLACK)
488 } else if(!byte_diff(p, 16, "access.blacklist" ) && isspace(p[16])) { 507 } else if (!byte_diff(p, 16, "access.blacklist") && isspace(p[16])) {
489 set_config_option( &g_accesslist_filename, p+17 ); 508 set_config_option(&g_accesslist_filename, p + 17);
490#endif 509#endif
491#ifdef WANT_DYNAMIC_ACCESSLIST 510#ifdef WANT_DYNAMIC_ACCESSLIST
492 } else if(!byte_diff(p, 15, "access.fifo_add" ) && isspace(p[15])) { 511 } else if (!byte_diff(p, 15, "access.fifo_add") && isspace(p[15])) {
493 set_config_option( &g_accesslist_pipe_add, p+16 ); 512 set_config_option(&g_accesslist_pipe_add, p + 16);
494 } else if(!byte_diff(p, 18, "access.fifo_delete" ) && isspace(p[18])) { 513 } else if (!byte_diff(p, 18, "access.fifo_delete") && isspace(p[18])) {
495 set_config_option( &g_accesslist_pipe_delete, p+19 ); 514 set_config_option(&g_accesslist_pipe_delete, p + 19);
496#endif 515#endif
497#ifdef WANT_RESTRICT_STATS 516#ifdef WANT_RESTRICT_STATS
498 } else if(!byte_diff(p, 12, "access.stats" ) && isspace(p[12])) { 517 } else if (!byte_diff(p, 12, "access.stats") && isspace(p[12])) {
499 if( !scan_ip6_net( p+13, &tmpnet )) goto parse_error; 518 if (!scan_ip6_net(p + 13, &tmpnet))
500 accesslist_bless_net( &tmpnet, OT_PERMISSION_MAY_STAT ); 519 goto parse_error;
520 accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_STAT);
501#endif 521#endif
502 } else if(!byte_diff(p, 17, "access.stats_path" ) && isspace(p[17])) { 522 } else if (!byte_diff(p, 17, "access.stats_path") && isspace(p[17])) {
503 set_config_option( &g_stats_path, p+18 ); 523 set_config_option(&g_stats_path, p + 18);
504#ifdef WANT_IP_FROM_PROXY 524#ifdef WANT_IP_FROM_PROXY
505 } else if(!byte_diff(p, 12, "access.proxy" ) && isspace(p[12])) { 525 } else if (!byte_diff(p, 12, "access.proxy") && isspace(p[12])) {
506 if( !scan_ip6_net( p+13, &tmpnet )) goto parse_error; 526 if (!scan_ip6_net(p + 13, &tmpnet))
507 accesslist_bless_net( &tmpnet, OT_PERMISSION_MAY_PROXY ); 527 goto parse_error;
528 accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_PROXY);
508#endif 529#endif
509 } else if(!byte_diff(p, 20, "tracker.redirect_url" ) && isspace(p[20])) { 530 } else if (!byte_diff(p, 20, "tracker.redirect_url") && isspace(p[20])) {
510 set_config_option( &g_redirecturl, p+21 ); 531 set_config_option(&g_redirecturl, p + 21);
511#ifdef WANT_SYNC_LIVE 532#ifdef WANT_SYNC_LIVE
512 } else if(!byte_diff(p, 24, "livesync.cluster.node_ip" ) && isspace(p[24])) { 533 } else if (!byte_diff(p, 24, "livesync.cluster.node_ip") && isspace(p[24])) {
513 if( !scan_ip6_net( p+25, &tmpnet )) goto parse_error; 534 if (!scan_ip6_net(p + 25, &tmpnet))
514 accesslist_bless_net( &tmpnet, OT_PERMISSION_MAY_LIVESYNC ); 535 goto parse_error;
515 } else if(!byte_diff(p, 23, "livesync.cluster.listen" ) && isspace(p[23])) { 536 accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_LIVESYNC);
537 } else if (!byte_diff(p, 23, "livesync.cluster.listen") && isspace(p[23])) {
516 uint16_t tmpport = LIVESYNC_PORT; 538 uint16_t tmpport = LIVESYNC_PORT;
517 if( !scan_ip6_port( p+24, tmpip, &tmpport )) goto parse_error; 539 if (!scan_ip6_port(p + 24, tmpip, &tmpport))
518 livesync_bind_mcast( tmpip, tmpport ); 540 goto parse_error;
541 livesync_bind_mcast(tmpip, tmpport);
519#endif 542#endif
520 } else 543 } else
521 fprintf( stderr, "Unhandled line in config file: %s\n", inbuf ); 544 fprintf(stderr, "Unhandled line in config file: %s\n", inbuf);
522 continue; 545 continue;
523 parse_error: 546 parse_error:
524 fprintf( stderr, "Parse error in config file: %s\n", inbuf); 547 fprintf(stderr, "Parse error in config file: %s\n", inbuf);
525 } 548 }
526 fclose( accesslist_filehandle ); 549 fclose(accesslist_filehandle);
527 return bound; 550 return bound;
528} 551}
529 552
530void load_state(const char * const state_filename ) { 553void load_state(const char *const state_filename) {
531 FILE * state_filehandle; 554 FILE *state_filehandle;
532 char inbuf[512]; 555 char inbuf[512];
533 ot_hash infohash; 556 ot_hash infohash;
534 unsigned long long base, downcount; 557 unsigned long long base, downcount;
535 int consumed; 558 int consumed;
536 559
537 state_filehandle = fopen( state_filename, "r" ); 560 state_filehandle = fopen(state_filename, "r");
538 561
539 if( state_filehandle == NULL ) { 562 if (state_filehandle == NULL) {
540 fprintf( stderr, "Warning: Can't open config file: %s.", state_filename ); 563 fprintf(stderr, "Warning: Can't open config file: %s.", state_filename);
541 return; 564 return;
542 } 565 }
543 566
544 /* We do ignore anything that is not of the form "^[:xdigit:]:\d+:\d+" */ 567 /* We do ignore anything that is not of the form "^[:xdigit:]:\d+:\d+" */
545 while( fgets( inbuf, sizeof(inbuf), state_filehandle ) ) { 568 while (fgets(inbuf, sizeof(inbuf), state_filehandle)) {
546 int i; 569 int i;
547 for( i=0; i<(int)sizeof(ot_hash); ++i ) { 570 for (i = 0; i < (int)sizeof(ot_hash); ++i) {
548 int eger = 16 * scan_fromhex( inbuf[ 2*i ] ) + scan_fromhex( inbuf[ 1 + 2*i ] ); 571 int eger = 16 * scan_fromhex(inbuf[2 * i]) + scan_fromhex(inbuf[1 + 2 * i]);
549 if( eger < 0 ) 572 if (eger < 0)
550 continue; 573 continue;
551 infohash[i] = eger; 574 infohash[i] = eger;
552 } 575 }
553 576
554 if( i != (int)sizeof(ot_hash) ) continue; 577 if (i != (int)sizeof(ot_hash))
578 continue;
555 i *= 2; 579 i *= 2;
556 580
557 if( inbuf[ i++ ] != ':' || !( consumed = scan_ulonglong( inbuf+i, &base ) ) ) continue; 581 if (inbuf[i++] != ':' || !(consumed = scan_ulonglong(inbuf + i, &base)))
582 continue;
558 i += consumed; 583 i += consumed;
559 if( inbuf[ i++ ] != ':' || !( consumed = scan_ulonglong( inbuf+i, &downcount ) ) ) continue; 584 if (inbuf[i++] != ':' || !(consumed = scan_ulonglong(inbuf + i, &downcount)))
560 add_torrent_from_saved_state( infohash, base, downcount ); 585 continue;
586 add_torrent_from_saved_state(infohash, base, downcount);
561 } 587 }
562 588
563 fclose( state_filehandle ); 589 fclose(state_filehandle);
564} 590}
565 591
566int drop_privileges ( const char * const serveruser, const char * const serverdir ) { 592int drop_privileges(const char *const serveruser, const char *const serverdir) {
567 struct passwd *pws = NULL; 593 struct passwd *pws = NULL;
568 594
569#ifdef _DEBUG 595#ifdef _DEBUG
570 if( !geteuid() ) 596 if (!geteuid())
571 fprintf( stderr, "Dropping to user %s.\n", serveruser ); 597 fprintf(stderr, "Dropping to user %s.\n", serveruser);
572 if( serverdir ) 598 if (serverdir)
573 fprintf( stderr, "ch%s'ing to directory %s.\n", geteuid() ? "dir" : "root", serverdir ); 599 fprintf(stderr, "ch%s'ing to directory %s.\n", geteuid() ? "dir" : "root", serverdir);
574#endif 600#endif
575 601
576 /* Grab pws entry before chrooting */ 602 /* Grab pws entry before chrooting */
577 pws = getpwnam( serveruser ); 603 pws = getpwnam(serveruser);
578 endpwent(); 604 endpwent();
579 605
580 if( geteuid() == 0 ) { 606 if (geteuid() == 0) {
581 /* Running as root: chroot and drop privileges */ 607 /* Running as root: chroot and drop privileges */
582 if( serverdir && chroot( serverdir ) ) { 608 if (serverdir && chroot(serverdir)) {
583 fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) ); 609 fprintf(stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno));
584 return -1; 610 return -1;
585 } 611 }
586 612
587 if(chdir("/")) 613 if (chdir("/"))
588 panic("chdir() failed after chrooting: "); 614 panic("chdir() failed after chrooting: ");
589 615
590 /* If we can't find server user, revert to nobody's default uid */ 616 /* If we can't find server user, revert to nobody's default uid */
591 if( !pws ) { 617 if (!pws) {
592 fprintf( stderr, "Warning: Could not get password entry for %s. Reverting to uid -2.\n", serveruser ); 618 fprintf(stderr, "Warning: Could not get password entry for %s. Reverting to uid -2.\n", serveruser);
593 if (setegid( (gid_t)-2 ) || setgid( (gid_t)-2 ) || setuid( (uid_t)-2 ) || seteuid( (uid_t)-2 )) 619 if (setegid((gid_t)-2) || setgid((gid_t)-2) || setuid((uid_t)-2) || seteuid((uid_t)-2))
594 panic("Could not set uid to value -2"); 620 panic("Could not set uid to value -2");
595 } 621 } else {
596 else { 622 if (setegid(pws->pw_gid) || setgid(pws->pw_gid) || setuid(pws->pw_uid) || seteuid(pws->pw_uid))
597 if (setegid( pws->pw_gid ) || setgid( pws->pw_gid ) || setuid( pws->pw_uid ) || seteuid( pws->pw_uid ))
598 panic("Could not set uid to specified value"); 623 panic("Could not set uid to specified value");
599 } 624 }
600 625
601 if( geteuid() == 0 || getegid() == 0 ) 626 if (geteuid() == 0 || getegid() == 0)
602 panic("Still running with root privileges?!"); 627 panic("Still running with root privileges?!");
603 } 628 } else {
604 else {
605 /* Normal user, just chdir() */ 629 /* Normal user, just chdir() */
606 if( serverdir && chdir( serverdir ) ) { 630 if (serverdir && chdir(serverdir)) {
607 fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) ); 631 fprintf(stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno));
608 return -1; 632 return -1;
609 } 633 }
610 } 634 }
@@ -613,7 +637,7 @@ int drop_privileges ( const char * const serveruser, const char * const serverdi
613} 637}
614 638
615/* Maintain our copy of the clock. time() on BSDs is very expensive. */ 639/* Maintain our copy of the clock. time() on BSDs is very expensive. */
616static void *time_caching_worker(void*args) { 640static void *time_caching_worker(void *args) {
617 (void)args; 641 (void)args;
618 while (1) { 642 while (1) {
619 g_now_seconds = time(NULL); 643 g_now_seconds = time(NULL);
@@ -621,105 +645,146 @@ static void *time_caching_worker(void*args) {
621 } 645 }
622} 646}
623 647
624int main( int argc, char **argv ) { 648int main(int argc, char **argv) {
625 ot_ip6 serverip; 649 ot_ip6 serverip;
626 ot_net tmpnet; 650 ot_net tmpnet;
627 int bound = 0, scanon = 1; 651 int bound = 0, scanon = 1;
628 uint16_t tmpport; 652 uint16_t tmpport;
629 char * statefile = 0; 653 char *statefile = 0;
630 pthread_t thread_id; /* time cacher */ 654 pthread_t thread_id; /* time cacher */
631 655
632 memset( serverip, 0, sizeof(ot_ip6) ); 656 memset(serverip, 0, sizeof(ot_ip6));
633#ifdef WANT_V4_ONLY 657#ifdef WANT_V4_ONLY
634 serverip[10]=serverip[11]=-1; 658 serverip[10] = serverip[11] = -1;
635#endif 659#endif
636 660
637#ifdef WANT_DEV_RANDOM 661#ifdef WANT_DEV_RANDOM
638 srandomdev(); 662 srandomdev();
639#else 663#else
640 srandom( time(NULL) ); 664 srandom(time(NULL));
641#endif 665#endif
642 666
643 while( scanon ) { 667 while (scanon) {
644 switch( getopt( argc, argv, ":i:p:A:P:d:u:r:s:f:l:v" 668 switch (getopt(argc, argv,
669 ":i:p:A:P:d:u:r:s:f:l:v"
645#ifdef WANT_ACCESSLIST_BLACK 670#ifdef WANT_ACCESSLIST_BLACK
646"b:" 671 "b:"
647#elif defined( WANT_ACCESSLIST_WHITE ) 672#elif defined(WANT_ACCESSLIST_WHITE)
648"w:" 673 "w:"
649#endif 674#endif
650 "h" ) ) { 675 "h")) {
651 case -1 : scanon = 0; break; 676 case -1:
652 case 'i': 677 scanon = 0;
653 if( !scan_ip6( optarg, serverip )) { usage( argv[0] ); exit( 1 ); } 678 break;
654 break; 679 case 'i':
680 if (!scan_ip6(optarg, serverip)) {
681 usage(argv[0]);
682 exit(1);
683 }
684 break;
655#ifdef WANT_ACCESSLIST_BLACK 685#ifdef WANT_ACCESSLIST_BLACK
656 case 'b': set_config_option( &g_accesslist_filename, optarg); break; 686 case 'b':
657#elif defined( WANT_ACCESSLIST_WHITE ) 687 set_config_option(&g_accesslist_filename, optarg);
658 case 'w': set_config_option( &g_accesslist_filename, optarg); break; 688 break;
689#elif defined(WANT_ACCESSLIST_WHITE)
690 case 'w':
691 set_config_option(&g_accesslist_filename, optarg);
692 break;
659#endif 693#endif
660 case 'p': 694 case 'p':
661 if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } 695 if (!scan_ushort(optarg, &tmpport)) {
662 ot_try_bind( serverip, tmpport, FLAG_TCP ); bound++; break; 696 usage(argv[0]);
663 case 'P': 697 exit(1);
664 if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } 698 }
665 ot_try_bind( serverip, tmpport, FLAG_UDP ); bound++; break; 699 ot_try_bind(serverip, tmpport, FLAG_TCP);
700 bound++;
701 break;
702 case 'P':
703 if (!scan_ushort(optarg, &tmpport)) {
704 usage(argv[0]);
705 exit(1);
706 }
707 ot_try_bind(serverip, tmpport, FLAG_UDP);
708 bound++;
709 break;
666#ifdef WANT_SYNC_LIVE 710#ifdef WANT_SYNC_LIVE
667 case 's': 711 case 's':
668 if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } 712 if (!scan_ushort(optarg, &tmpport)) {
669 livesync_bind_mcast( serverip, tmpport); break; 713 usage(argv[0]);
714 exit(1);
715 }
716 livesync_bind_mcast(serverip, tmpport);
717 break;
670#endif 718#endif
671 case 'd': set_config_option( &g_serverdir, optarg ); break; 719 case 'd':
672 case 'u': set_config_option( &g_serveruser, optarg ); break; 720 set_config_option(&g_serverdir, optarg);
673 case 'r': set_config_option( &g_redirecturl, optarg ); break; 721 break;
674 case 'l': statefile = optarg; break; 722 case 'u':
675 case 'A': 723 set_config_option(&g_serveruser, optarg);
676 if( !scan_ip6_net( optarg, &tmpnet )) { usage( argv[0] ); exit( 1 ); } 724 break;
677 accesslist_bless_net( &tmpnet, 0xffff ); /* Allow everything for now */ 725 case 'r':
678 break; 726 set_config_option(&g_redirecturl, optarg);
679 case 'f': bound += parse_configfile( optarg ); break; 727 break;
680 case 'h': help( argv[0] ); exit( 0 ); 728 case 'l':
681 case 'v': { 729 statefile = optarg;
682 char buffer[8192]; 730 break;
683 stats_return_tracker_version( buffer ); 731 case 'A':
684 fputs( buffer, stderr ); 732 if (!scan_ip6_net(optarg, &tmpnet)) {
685 exit( 0 ); 733 usage(argv[0]);
734 exit(1);
686 } 735 }
687 default: 736 accesslist_bless_net(&tmpnet, 0xffff); /* Allow everything for now */
688 case '?': usage( argv[0] ); exit( 1 ); 737 break;
738 case 'f':
739 bound += parse_configfile(optarg);
740 break;
741 case 'h':
742 help(argv[0]);
743 exit(0);
744 case 'v': {
745 char buffer[8192];
746 stats_return_tracker_version(buffer);
747 fputs(buffer, stderr);
748 exit(0);
749 }
750 default:
751 case '?':
752 usage(argv[0]);
753 exit(1);
689 } 754 }
690 } 755 }
691 756
692 /* Bind to our default tcp/udp ports */ 757 /* Bind to our default tcp/udp ports */
693 if( !bound) { 758 if (!bound) {
694 ot_try_bind( serverip, 6969, FLAG_TCP ); 759 ot_try_bind(serverip, 6969, FLAG_TCP);
695 ot_try_bind( serverip, 6969, FLAG_UDP ); 760 ot_try_bind(serverip, 6969, FLAG_UDP);
696 } 761 }
697 762
698#ifdef WANT_SYSLOGS 763#ifdef WANT_SYSLOGS
699 openlog( "opentracker", 0, LOG_USER ); 764 openlog("opentracker", 0, LOG_USER);
700 setlogmask(LOG_UPTO(LOG_INFO)); 765 setlogmask(LOG_UPTO(LOG_INFO));
701#endif 766#endif
702 767
703 if( drop_privileges( g_serveruser ? g_serveruser : "nobody", g_serverdir ) == -1 ) 768 if (drop_privileges(g_serveruser ? g_serveruser : "nobody", g_serverdir) == -1)
704 panic( "drop_privileges failed, exiting. Last error"); 769 panic("drop_privileges failed, exiting. Last error");
705 770
706 g_now_seconds = time( NULL ); 771 g_now_seconds = time(NULL);
707 pthread_create( &thread_id, NULL, time_caching_worker, NULL); 772 pthread_create(&thread_id, NULL, time_caching_worker, NULL);
708 773
709 /* Create our self pipe which allows us to interrupt mainloops 774 /* Create our self pipe which allows us to interrupt mainloops
710 io_wait in case some data is available to send out */ 775 io_wait in case some data is available to send out */
711 if( pipe( g_self_pipe ) == -1 ) 776 if (pipe(g_self_pipe) == -1)
712 panic( "selfpipe failed: " ); 777 panic("selfpipe failed: ");
713 if( !io_fd( g_self_pipe[0] ) ) 778 if (!io_fd(g_self_pipe[0]))
714 panic( "selfpipe io_fd failed: " ); 779 panic("selfpipe io_fd failed: ");
715 if( !io_fd( g_self_pipe[1] ) ) 780 if (!io_fd(g_self_pipe[1]))
716 panic( "selfpipe io_fd failed: " ); 781 panic("selfpipe io_fd failed: ");
717 io_setcookie( g_self_pipe[0], (void*)FLAG_SELFPIPE ); 782 io_setcookie(g_self_pipe[0], (void *)FLAG_SELFPIPE);
718 io_wantread( g_self_pipe[0] ); 783 io_wantread(g_self_pipe[0]);
719 784
720 defaul_signal_handlers( ); 785 defaul_signal_handlers();
721 /* Init all sub systems. This call may fail with an exit() */ 786 /* Init all sub systems. This call may fail with an exit() */
722 trackerlogic_init( ); 787 trackerlogic_init();
723 788
724#ifdef _DEBUG_RANDOMTORRENTS 789#ifdef _DEBUG_RANDOMTORRENTS
725 fprintf(stderr, "DEBUG: Generating %d random peers on random torrents. This may take a while. (Setting RANDOMTORRENTS in trackerlogic.h)\n", RANDOMTORRENTS); 790 fprintf(stderr, "DEBUG: Generating %d random peers on random torrents. This may take a while. (Setting RANDOMTORRENTS in trackerlogic.h)\n", RANDOMTORRENTS);
@@ -727,15 +792,15 @@ int main( int argc, char **argv ) {
727 fprintf(stderr, "... done.\n"); 792 fprintf(stderr, "... done.\n");
728#endif 793#endif
729 794
730 if( statefile ) 795 if (statefile)
731 load_state( statefile ); 796 load_state(statefile);
732 797
733 install_signal_handlers( ); 798 install_signal_handlers();
734 799
735 if( !g_udp_workers ) 800 if (!g_udp_workers)
736 udp_init( -1, 0 ); 801 udp_init(-1, 0);
737 802
738 server_mainloop( 0 ); 803 server_mainloop(0);
739 804
740 return 0; 805 return 0;
741} 806}
diff --git a/ot_accesslist.c b/ot_accesslist.c
index 1badc25..c26e14a 100644
--- a/ot_accesslist.c
+++ b/ot_accesslist.c
@@ -5,35 +5,35 @@
5 5
6/* System */ 6/* System */
7#include <pthread.h> 7#include <pthread.h>
8#include <signal.h>
9#include <stdio.h>
8#include <stdlib.h> 10#include <stdlib.h>
9#include <string.h> 11#include <string.h>
10#include <stdio.h>
11#include <signal.h>
12#include <unistd.h> 12#include <unistd.h>
13#ifdef WANT_DYNAMIC_ACCESSLIST 13#ifdef WANT_DYNAMIC_ACCESSLIST
14#include <sys/types.h>
15#include <sys/stat.h>
16#include <errno.h> 14#include <errno.h>
15#include <sys/stat.h>
16#include <sys/types.h>
17#endif 17#endif
18 18
19/* Libowfat */ 19/* Libowfat */
20#include "byte.h" 20#include "byte.h"
21#include "scan.h" 21#include "fmt.h"
22#include "ip6.h" 22#include "ip6.h"
23#include "mmap.h" 23#include "mmap.h"
24#include "fmt.h" 24#include "scan.h"
25 25
26/* Opentracker */ 26/* Opentracker */
27#include "trackerlogic.h"
28#include "ot_accesslist.h" 27#include "ot_accesslist.h"
29#include "ot_vector.h" 28#include "ot_vector.h"
29#include "trackerlogic.h"
30 30
31/* GLOBAL VARIABLES */ 31/* GLOBAL VARIABLES */
32#ifdef WANT_ACCESSLIST 32#ifdef WANT_ACCESSLIST
33char *g_accesslist_filename = NULL; 33char *g_accesslist_filename = NULL;
34#ifdef WANT_DYNAMIC_ACCESSLIST 34#ifdef WANT_DYNAMIC_ACCESSLIST
35char *g_accesslist_pipe_add = NULL; 35char *g_accesslist_pipe_add = NULL;
36char *g_accesslist_pipe_delete = NULL; 36char *g_accesslist_pipe_delete = NULL;
37#endif 37#endif
38static pthread_mutex_t g_accesslist_mutex; 38static pthread_mutex_t g_accesslist_mutex;
39 39
@@ -55,20 +55,18 @@ struct ot_accesslist {
55 ot_time base; 55 ot_time base;
56 ot_accesslist *next; 56 ot_accesslist *next;
57}; 57};
58static ot_accesslist * _Atomic g_accesslist = NULL; 58static ot_accesslist *_Atomic g_accesslist = NULL;
59#ifdef WANT_DYNAMIC_ACCESSLIST 59#ifdef WANT_DYNAMIC_ACCESSLIST
60static ot_accesslist * _Atomic g_accesslist_add = NULL; 60static ot_accesslist *_Atomic g_accesslist_add = NULL;
61static ot_accesslist * _Atomic g_accesslist_delete = NULL; 61static ot_accesslist *_Atomic g_accesslist_delete = NULL;
62#endif 62#endif
63 63
64/* Helpers to work on access lists */ 64/* Helpers to work on access lists */
65static int vector_compare_hash(const void *hash1, const void *hash2 ) { 65static int vector_compare_hash(const void *hash1, const void *hash2) { return memcmp(hash1, hash2, OT_HASH_COMPARE_SIZE); }
66 return memcmp( hash1, hash2, OT_HASH_COMPARE_SIZE );
67}
68 66
69static ot_accesslist * accesslist_free(ot_accesslist *accesslist) { 67static ot_accesslist *accesslist_free(ot_accesslist *accesslist) {
70 while (accesslist) { 68 while (accesslist) {
71 ot_accesslist * this_accesslist = accesslist; 69 ot_accesslist *this_accesslist = accesslist;
72 accesslist = this_accesslist->next; 70 accesslist = this_accesslist->next;
73 free(this_accesslist->list); 71 free(this_accesslist->list);
74 free(this_accesslist); 72 free(this_accesslist);
@@ -76,8 +74,8 @@ static ot_accesslist * accesslist_free(ot_accesslist *accesslist) {
76 return NULL; 74 return NULL;
77} 75}
78 76
79static ot_accesslist * accesslist_make(ot_accesslist *next, size_t size) { 77static ot_accesslist *accesslist_make(ot_accesslist *next, size_t size) {
80 ot_accesslist * accesslist_new = malloc(sizeof(ot_accesslist)); 78 ot_accesslist *accesslist_new = malloc(sizeof(ot_accesslist));
81 if (accesslist_new) { 79 if (accesslist_new) {
82 accesslist_new->list = size ? malloc(sizeof(ot_hash) * size) : NULL; 80 accesslist_new->list = size ? malloc(sizeof(ot_hash) * size) : NULL;
83 accesslist_new->size = size; 81 accesslist_new->size = size;
@@ -102,76 +100,77 @@ static void accesslist_clean(ot_accesslist *accesslist) {
102} 100}
103 101
104/* Read initial access list */ 102/* Read initial access list */
105static void accesslist_readfile( void ) { 103static void accesslist_readfile(void) {
106 ot_accesslist * accesslist_new; 104 ot_accesslist *accesslist_new;
107 ot_hash *info_hash; 105 ot_hash *info_hash;
108 const char *map, *map_end, *read_offs; 106 const char *map, *map_end, *read_offs;
109 size_t maplen; 107 size_t maplen;
110 108
111 if( ( map = mmap_read( g_accesslist_filename, &maplen ) ) == NULL ) { 109 if ((map = mmap_read(g_accesslist_filename, &maplen)) == NULL) {
112 char *wd = getcwd( NULL, 0 ); 110 char *wd = getcwd(NULL, 0);
113 fprintf( stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd ); 111 fprintf(stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd);
114 free( wd ); 112 free(wd);
115 return; 113 return;
116 } 114 }
117 115
118 /* You need at least 41 bytes to pass an info_hash, make enough room 116 /* You need at least 41 bytes to pass an info_hash, make enough room
119 for the maximum amount of them */ 117 for the maximum amount of them */
120 accesslist_new = accesslist_make(g_accesslist, maplen / 41); 118 accesslist_new = accesslist_make(g_accesslist, maplen / 41);
121 if( !accesslist_new ) { 119 if (!accesslist_new) {
122 fprintf( stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", ( maplen / 41 ) * 20 ); 120 fprintf(stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", (maplen / 41) * 20);
123 mmap_unmap( map, maplen); 121 mmap_unmap(map, maplen);
124 return; 122 return;
125 } 123 }
126 info_hash = accesslist_new->list; 124 info_hash = accesslist_new->list;
127 125
128 /* No use to scan if there's not enough room for another full info_hash */ 126 /* No use to scan if there's not enough room for another full info_hash */
129 map_end = map + maplen - 40; 127 map_end = map + maplen - 40;
130 read_offs = map; 128 read_offs = map;
131 129
132 /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" */ 130 /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" */
133 while( read_offs <= map_end ) { 131 while (read_offs <= map_end) {
134 int i; 132 int i;
135 for( i=0; i<(int)sizeof(ot_hash); ++i ) { 133 for (i = 0; i < (int)sizeof(ot_hash); ++i) {
136 int eger1 = scan_fromhex( (unsigned char)read_offs[ 2*i ] ); 134 int eger1 = scan_fromhex((unsigned char)read_offs[2 * i]);
137 int eger2 = scan_fromhex( (unsigned char)read_offs[ 1 + 2*i ] ); 135 int eger2 = scan_fromhex((unsigned char)read_offs[1 + 2 * i]);
138 if( eger1 < 0 || eger2 < 0 ) 136 if (eger1 < 0 || eger2 < 0)
139 break; 137 break;
140 (*info_hash)[i] = (uint8_t)(eger1 * 16 + eger2); 138 (*info_hash)[i] = (uint8_t)(eger1 * 16 + eger2);
141 } 139 }
142 140
143 if( i == sizeof(ot_hash) ) { 141 if (i == sizeof(ot_hash)) {
144 read_offs += 40; 142 read_offs += 40;
145 143
146 /* Append accesslist to accesslist vector */ 144 /* Append accesslist to accesslist vector */
147 if( read_offs == map_end || scan_fromhex( (unsigned char)*read_offs ) < 0 ) 145 if (read_offs == map_end || scan_fromhex((unsigned char)*read_offs) < 0)
148 ++info_hash; 146 ++info_hash;
149 } 147 }
150 148
151 /* Find start of next line */ 149 /* Find start of next line */
152 while( read_offs <= map_end && *(read_offs++) != '\n' ); 150 while (read_offs <= map_end && *(read_offs++) != '\n')
151 ;
153 } 152 }
154#ifdef _DEBUG 153#ifdef _DEBUG
155 fprintf( stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new->list) ); 154 fprintf(stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new->list));
156#endif 155#endif
157 156
158 mmap_unmap( map, maplen); 157 mmap_unmap(map, maplen);
159 158
160 qsort( accesslist_new->list, info_hash - accesslist_new->list, sizeof( *info_hash ), vector_compare_hash ); 159 qsort(accesslist_new->list, info_hash - accesslist_new->list, sizeof(*info_hash), vector_compare_hash);
161 accesslist_new->size = info_hash - accesslist_new->list; 160 accesslist_new->size = info_hash - accesslist_new->list;
162 161
163 /* Now exchange the accesslist vector in the least race condition prone way */ 162 /* Now exchange the accesslist vector in the least race condition prone way */
164 pthread_mutex_lock(&g_accesslist_mutex); 163 pthread_mutex_lock(&g_accesslist_mutex);
165 accesslist_new->next = g_accesslist; 164 accesslist_new->next = g_accesslist;
166 g_accesslist = accesslist_new; /* Only now set a new list */ 165 g_accesslist = accesslist_new; /* Only now set a new list */
167 166
168#ifdef WANT_DYNAMIC_ACCESSLIST 167#ifdef WANT_DYNAMIC_ACCESSLIST
169 /* If we have dynamic accesslists, reloading a new one will always void the add/delete lists. 168 /* If we have dynamic accesslists, reloading a new one will always void the add/delete lists.
170 Insert empty ones at the list head */ 169 Insert empty ones at the list head */
171 if (g_accesslist_add && (accesslist_new = accesslist_make(g_accesslist_add, 0)) != NULL) 170 if (g_accesslist_add && (accesslist_new = accesslist_make(g_accesslist_add, 0)) != NULL)
172 g_accesslist_add = accesslist_new; 171 g_accesslist_add = accesslist_new;
173 if (g_accesslist_delete && (accesslist_new = accesslist_make(g_accesslist_delete, 0)) != NULL) 172 if (g_accesslist_delete && (accesslist_new = accesslist_make(g_accesslist_delete, 0)) != NULL)
174 g_accesslist_delete = accesslist_new; 173 g_accesslist_delete = accesslist_new;
175#endif 174#endif
176 175
177 accesslist_clean(g_accesslist); 176 accesslist_clean(g_accesslist);
@@ -179,26 +178,26 @@ static void accesslist_readfile( void ) {
179 pthread_mutex_unlock(&g_accesslist_mutex); 178 pthread_mutex_unlock(&g_accesslist_mutex);
180} 179}
181 180
182int accesslist_hashisvalid( ot_hash hash ) { 181int accesslist_hashisvalid(ot_hash hash) {
183 /* Get working copy of current access list */ 182 /* Get working copy of current access list */
184 ot_accesslist * accesslist = g_accesslist; 183 ot_accesslist *accesslist = g_accesslist;
185#ifdef WANT_DYNAMIC_ACCESSLIST 184#ifdef WANT_DYNAMIC_ACCESSLIST
186 ot_accesslist * accesslist_add, * accesslist_delete; 185 ot_accesslist *accesslist_add, *accesslist_delete;
187#endif 186#endif
188 void * exactmatch = NULL; 187 void *exactmatch = NULL;
189 188
190 if (accesslist) 189 if (accesslist)
191 exactmatch = bsearch( hash, accesslist->list, accesslist->size, OT_HASH_COMPARE_SIZE, vector_compare_hash ); 190 exactmatch = bsearch(hash, accesslist->list, accesslist->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
192 191
193#ifdef WANT_DYNAMIC_ACCESSLIST 192#ifdef WANT_DYNAMIC_ACCESSLIST
194 /* If we had no match on the main list, scan the list of dynamically added hashes */ 193 /* If we had no match on the main list, scan the list of dynamically added hashes */
195 accesslist_add = g_accesslist_add; 194 accesslist_add = g_accesslist_add;
196 if ((exactmatch == NULL) && accesslist_add) 195 if ((exactmatch == NULL) && accesslist_add)
197 exactmatch = bsearch( hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash ); 196 exactmatch = bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
198 197
199 /* If we found a matching hash on the main list, scan the list of dynamically deleted hashes */ 198 /* If we found a matching hash on the main list, scan the list of dynamically deleted hashes */
200 accesslist_delete = g_accesslist_delete; 199 accesslist_delete = g_accesslist_delete;
201 if ((exactmatch != NULL) && accesslist_delete && bsearch( hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash )) 200 if ((exactmatch != NULL) && accesslist_delete && bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash))
202 exactmatch = NULL; 201 exactmatch = NULL;
203#endif 202#endif
204 203
@@ -209,31 +208,32 @@ int accesslist_hashisvalid( ot_hash hash ) {
209#endif 208#endif
210} 209}
211 210
212static void * accesslist_worker( void * args ) { 211static void *accesslist_worker(void *args) {
213 int sig; 212 int sig;
214 sigset_t signal_mask; 213 sigset_t signal_mask;
215 214
216 sigemptyset(&signal_mask); 215 sigemptyset(&signal_mask);
217 sigaddset(&signal_mask, SIGHUP); 216 sigaddset(&signal_mask, SIGHUP);
218 217
219 (void)args; 218 (void)args;
220 219
221 while( 1 ) { 220 while (1) {
222 if (!g_opentracker_running) 221 if (!g_opentracker_running)
223 return NULL; 222 return NULL;
224 223
225 /* Initial attempt to read accesslist */ 224 /* Initial attempt to read accesslist */
226 accesslist_readfile( ); 225 accesslist_readfile();
227 226
228 /* Wait for signals */ 227 /* Wait for signals */
229 while( sigwait (&signal_mask, &sig) != 0 && sig != SIGHUP ); 228 while (sigwait(&signal_mask, &sig) != 0 && sig != SIGHUP)
229 ;
230 } 230 }
231 return NULL; 231 return NULL;
232} 232}
233 233
234#ifdef WANT_DYNAMIC_ACCESSLIST 234#ifdef WANT_DYNAMIC_ACCESSLIST
235static pthread_t thread_adder_id, thread_deleter_id; 235static pthread_t thread_adder_id, thread_deleter_id;
236static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic * adding_to, ot_accesslist * _Atomic * removing_from) { 236static void *accesslist_adddel_worker(char *fifoname, ot_accesslist *_Atomic *adding_to, ot_accesslist *_Atomic *removing_from) {
237 struct stat st; 237 struct stat st;
238 238
239 if (!stat(fifoname, &st)) { 239 if (!stat(fifoname, &st)) {
@@ -250,9 +250,9 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic
250 } 250 }
251 251
252 while (g_opentracker_running) { 252 while (g_opentracker_running) {
253 FILE * fifo = fopen(fifoname, "r"); 253 FILE *fifo = fopen(fifoname, "r");
254 char *line = NULL; 254 char *line = NULL;
255 size_t linecap = 0; 255 size_t linecap = 0;
256 ssize_t linelen; 256 ssize_t linelen;
257 257
258 if (!fifo) { 258 if (!fifo) {
@@ -262,7 +262,7 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic
262 262
263 while ((linelen = getline(&line, &linecap, fifo)) > 0) { 263 while ((linelen = getline(&line, &linecap, fifo)) > 0) {
264 ot_hash info_hash; 264 ot_hash info_hash;
265 int i; 265 int i;
266 266
267 printf("Got line %*s", (int)linelen, line); 267 printf("Got line %*s", (int)linelen, line);
268 /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" 268 /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*"
@@ -270,15 +270,15 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic
270 if (linelen < 41) 270 if (linelen < 41)
271 continue; 271 continue;
272 272
273 for( i=0; i<(int)sizeof(ot_hash); ++i ) { 273 for (i = 0; i < (int)sizeof(ot_hash); ++i) {
274 int eger1 = scan_fromhex( (unsigned char)line[ 2*i ] ); 274 int eger1 = scan_fromhex((unsigned char)line[2 * i]);
275 int eger2 = scan_fromhex( (unsigned char)line[ 1 + 2*i ] ); 275 int eger2 = scan_fromhex((unsigned char)line[1 + 2 * i]);
276 if( eger1 < 0 || eger2 < 0 ) 276 if (eger1 < 0 || eger2 < 0)
277 break; 277 break;
278 ((uint8_t*)info_hash)[i] = (uint8_t)(eger1 * 16 + eger2); 278 ((uint8_t *)info_hash)[i] = (uint8_t)(eger1 * 16 + eger2);
279 } 279 }
280printf("parsed info_hash %20s\n", info_hash); 280 printf("parsed info_hash %20s\n", info_hash);
281 if( i != sizeof(ot_hash) ) 281 if (i != sizeof(ot_hash))
282 continue; 282 continue;
283 283
284 /* From now on we modify g_accesslist_add and g_accesslist_delete, so prevent the 284 /* From now on we modify g_accesslist_add and g_accesslist_delete, so prevent the
@@ -287,10 +287,10 @@ printf("parsed info_hash %20s\n", info_hash);
287 287
288 /* If the info hash is in the removing_from list, create a new head without that entry */ 288 /* If the info hash is in the removing_from list, create a new head without that entry */
289 if (*removing_from && (*removing_from)->list) { 289 if (*removing_from && (*removing_from)->list) {
290 ot_hash * exactmatch = bsearch( info_hash, (*removing_from)->list, (*removing_from)->size, OT_HASH_COMPARE_SIZE, vector_compare_hash ); 290 ot_hash *exactmatch = bsearch(info_hash, (*removing_from)->list, (*removing_from)->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
291 if (exactmatch) { 291 if (exactmatch) {
292 ptrdiff_t off = exactmatch - (*removing_from)->list; 292 ptrdiff_t off = exactmatch - (*removing_from)->list;
293 ot_accesslist * accesslist_new = accesslist_make(*removing_from, (*removing_from)->size - 1); 293 ot_accesslist *accesslist_new = accesslist_make(*removing_from, (*removing_from)->size - 1);
294 if (accesslist_new) { 294 if (accesslist_new) {
295 memcpy(accesslist_new->list, (*removing_from)->list, sizeof(ot_hash) * off); 295 memcpy(accesslist_new->list, (*removing_from)->list, sizeof(ot_hash) * off);
296 memcpy(accesslist_new->list + off, (*removing_from)->list + off + 1, (*removing_from)->size - off - 1); 296 memcpy(accesslist_new->list + off, (*removing_from)->list + off + 1, (*removing_from)->size - off - 1);
@@ -301,19 +301,19 @@ printf("parsed info_hash %20s\n", info_hash);
301 301
302 /* Simple case: there's no adding_to list yet, create one with one member */ 302 /* Simple case: there's no adding_to list yet, create one with one member */
303 if (!*adding_to) { 303 if (!*adding_to) {
304 ot_accesslist * accesslist_new = accesslist_make(NULL, 1); 304 ot_accesslist *accesslist_new = accesslist_make(NULL, 1);
305 if (accesslist_new) { 305 if (accesslist_new) {
306 memcpy(accesslist_new->list, info_hash, sizeof(ot_hash)); 306 memcpy(accesslist_new->list, info_hash, sizeof(ot_hash));
307 *adding_to = accesslist_new; 307 *adding_to = accesslist_new;
308 } 308 }
309 } else { 309 } else {
310 int exactmatch = 0; 310 int exactmatch = 0;
311 ot_hash * insert_point = binary_search( info_hash, (*adding_to)->list, (*adding_to)->size, OT_HASH_COMPARE_SIZE, sizeof(ot_hash), &exactmatch ); 311 ot_hash *insert_point = binary_search(info_hash, (*adding_to)->list, (*adding_to)->size, OT_HASH_COMPARE_SIZE, sizeof(ot_hash), &exactmatch);
312 312
313 /* Only if the info hash is not in the adding_to list, create a new head with that entry */ 313 /* Only if the info hash is not in the adding_to list, create a new head with that entry */
314 if (!exactmatch) { 314 if (!exactmatch) {
315 ot_accesslist * accesslist_new = accesslist_make(*adding_to, (*adding_to)->size + 1); 315 ot_accesslist *accesslist_new = accesslist_make(*adding_to, (*adding_to)->size + 1);
316 ptrdiff_t off = insert_point - (*adding_to)->list; 316 ptrdiff_t off = insert_point - (*adding_to)->list;
317 if (accesslist_new) { 317 if (accesslist_new) {
318 memcpy(accesslist_new->list, (*adding_to)->list, sizeof(ot_hash) * off); 318 memcpy(accesslist_new->list, (*adding_to)->list, sizeof(ot_hash) * off);
319 memcpy(accesslist_new->list + off, info_hash, sizeof(info_hash)); 319 memcpy(accesslist_new->list + off, info_hash, sizeof(info_hash));
@@ -331,29 +331,29 @@ printf("parsed info_hash %20s\n", info_hash);
331 return NULL; 331 return NULL;
332} 332}
333 333
334static void * accesslist_adder_worker( void * args ) { 334static void *accesslist_adder_worker(void *args) {
335 (void)args; 335 (void)args;
336 return accesslist_adddel_worker(g_accesslist_pipe_add, &g_accesslist_add, &g_accesslist_delete); 336 return accesslist_adddel_worker(g_accesslist_pipe_add, &g_accesslist_add, &g_accesslist_delete);
337} 337}
338static void * accesslist_deleter_worker( void * args ) { 338static void *accesslist_deleter_worker(void *args) {
339 (void)args; 339 (void)args;
340 return accesslist_adddel_worker(g_accesslist_pipe_delete, &g_accesslist_delete, &g_accesslist_add); 340 return accesslist_adddel_worker(g_accesslist_pipe_delete, &g_accesslist_delete, &g_accesslist_add);
341} 341}
342#endif 342#endif
343 343
344static pthread_t thread_id; 344static pthread_t thread_id;
345void accesslist_init( ) { 345void accesslist_init() {
346 pthread_mutex_init(&g_accesslist_mutex, NULL); 346 pthread_mutex_init(&g_accesslist_mutex, NULL);
347 pthread_create( &thread_id, NULL, accesslist_worker, NULL ); 347 pthread_create(&thread_id, NULL, accesslist_worker, NULL);
348#ifdef WANT_DYNAMIC_ACCESSLIST 348#ifdef WANT_DYNAMIC_ACCESSLIST
349 if (g_accesslist_pipe_add) 349 if (g_accesslist_pipe_add)
350 pthread_create( &thread_adder_id, NULL, accesslist_adder_worker, NULL ); 350 pthread_create(&thread_adder_id, NULL, accesslist_adder_worker, NULL);
351 if (g_accesslist_pipe_delete) 351 if (g_accesslist_pipe_delete)
352 pthread_create( &thread_deleter_id, NULL, accesslist_deleter_worker, NULL ); 352 pthread_create(&thread_deleter_id, NULL, accesslist_deleter_worker, NULL);
353#endif 353#endif
354} 354}
355 355
356void accesslist_deinit( void ) { 356void accesslist_deinit(void) {
357 /* Wake up sleeping worker */ 357 /* Wake up sleeping worker */
358 pthread_kill(thread_id, SIGHUP); 358 pthread_kill(thread_id, SIGHUP);
359 359
@@ -362,16 +362,16 @@ void accesslist_deinit( void ) {
362 g_accesslist = accesslist_free(g_accesslist); 362 g_accesslist = accesslist_free(g_accesslist);
363 363
364#ifdef WANT_DYNAMIC_ACCESSLIST 364#ifdef WANT_DYNAMIC_ACCESSLIST
365 g_accesslist_add = accesslist_free(g_accesslist_add); 365 g_accesslist_add = accesslist_free(g_accesslist_add);
366 g_accesslist_delete = accesslist_free(g_accesslist_delete); 366 g_accesslist_delete = accesslist_free(g_accesslist_delete);
367#endif 367#endif
368 368
369 pthread_mutex_unlock(&g_accesslist_mutex); 369 pthread_mutex_unlock(&g_accesslist_mutex);
370 pthread_cancel( thread_id ); 370 pthread_cancel(thread_id);
371 pthread_mutex_destroy(&g_accesslist_mutex); 371 pthread_mutex_destroy(&g_accesslist_mutex);
372} 372}
373 373
374void accesslist_cleanup( void ) { 374void accesslist_cleanup(void) {
375 pthread_mutex_lock(&g_accesslist_mutex); 375 pthread_mutex_lock(&g_accesslist_mutex);
376 376
377 accesslist_clean(g_accesslist); 377 accesslist_clean(g_accesslist);
@@ -384,35 +384,34 @@ void accesslist_cleanup( void ) {
384} 384}
385#endif 385#endif
386 386
387int address_in_net( const ot_ip6 address, const ot_net *net ) { 387int address_in_net(const ot_ip6 address, const ot_net *net) {
388 int bits = net->bits, checkbits = ( 0x7f00 >> ( bits & 7 )); 388 int bits = net->bits, checkbits = (0x7f00 >> (bits & 7));
389 int result = memcmp( address, &net->address, bits >> 3 ); 389 int result = memcmp(address, &net->address, bits >> 3);
390 if( !result && ( bits & 7 ) ) 390 if (!result && (bits & 7))
391 result = ( checkbits & address[bits>>3] ) - ( checkbits & net->address[bits>>3]); 391 result = (checkbits & address[bits >> 3]) - (checkbits & net->address[bits >> 3]);
392 return result == 0; 392 return result == 0;
393} 393}
394 394
395void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size ) { 395void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size) {
396 size_t i; 396 size_t i;
397 int exactmatch; 397 int exactmatch;
398 398
399 /* Caller must have a concept of ot_net in it's member */ 399 /* Caller must have a concept of ot_net in it's member */
400 if( member_size < sizeof(ot_net) ) 400 if (member_size < sizeof(ot_net))
401 return 0; 401 return 0;
402 402
403 /* Check each net in vector for overlap */ 403 /* Check each net in vector for overlap */
404 uint8_t *member = ((uint8_t*)vector->data); 404 uint8_t *member = ((uint8_t *)vector->data);
405 for( i=0; i<vector->size; ++i ) { 405 for (i = 0; i < vector->size; ++i) {
406 if( address_in_net( *(ot_ip6*)member, net ) || 406 if (address_in_net(*(ot_ip6 *)member, net) || address_in_net(net->address, (ot_net *)member))
407 address_in_net( net->address, (ot_net*)member ) )
408 return 0; 407 return 0;
409 member += member_size; 408 member += member_size;
410 } 409 }
411 410
412 member = vector_find_or_insert( vector, (void*)net, member_size, sizeof(ot_net), &exactmatch ); 411 member = vector_find_or_insert(vector, (void *)net, member_size, sizeof(ot_net), &exactmatch);
413 if( member ) { 412 if (member) {
414 memcpy( member, net, sizeof(ot_net)); 413 memcpy(member, net, sizeof(ot_net));
415 memcpy( member + sizeof(ot_net), value, member_size - sizeof(ot_net)); 414 memcpy(member + sizeof(ot_net), value, member_size - sizeof(ot_net));
416 } 415 }
417 416
418 return member; 417 return member;
@@ -420,43 +419,43 @@ void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value
420 419
421/* Takes a vector filled with { ot_net net, uint8_t[x] value }; 420/* Takes a vector filled with { ot_net net, uint8_t[x] value };
422 Returns value associated with the net, or NULL if not found */ 421 Returns value associated with the net, or NULL if not found */
423void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size ) { 422void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size) {
424 int exactmatch; 423 int exactmatch;
425 /* This binary search will return a pointer to the first non-containing network... */ 424 /* This binary search will return a pointer to the first non-containing network... */
426 ot_net *net = binary_search( address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch ); 425 ot_net *net = binary_search(address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch);
427 if( !net ) 426 if (!net)
428 return NULL; 427 return NULL;
429 /* ... so we'll need to move back one step unless we've exactly hit the first address in network */ 428 /* ... so we'll need to move back one step unless we've exactly hit the first address in network */
430 if( !exactmatch && ( (void*)net > vector->data ) ) 429 if (!exactmatch && ((void *)net > vector->data))
431 --net; 430 --net;
432 if( !address_in_net( address, net ) ) 431 if (!address_in_net(address, net))
433 return NULL; 432 return NULL;
434 return (void*)net; 433 return (void *)net;
435} 434}
436 435
437#ifdef WANT_FULLLOG_NETWORKS 436#ifdef WANT_FULLLOG_NETWORKS
438static ot_vector g_lognets_list; 437static ot_vector g_lognets_list;
439ot_log *g_logchain_first, *g_logchain_last; 438ot_log *g_logchain_first, *g_logchain_last;
440
441static pthread_mutex_t g_lognets_list_mutex = PTHREAD_MUTEX_INITIALIZER; 439static pthread_mutex_t g_lognets_list_mutex = PTHREAD_MUTEX_INITIALIZER;
442void loglist_add_network( const ot_net *net ) { 440
441void loglist_add_network(const ot_net *net) {
443 pthread_mutex_lock(&g_lognets_list_mutex); 442 pthread_mutex_lock(&g_lognets_list_mutex);
444 set_value_for_net( net, &g_lognets_list, NULL, sizeof(ot_net)); 443 set_value_for_net(net, &g_lognets_list, NULL, sizeof(ot_net));
445 pthread_mutex_unlock(&g_lognets_list_mutex); 444 pthread_mutex_unlock(&g_lognets_list_mutex);
446} 445}
447 446
448void loglist_reset( ) { 447void loglist_reset() {
449 pthread_mutex_lock(&g_lognets_list_mutex); 448 pthread_mutex_lock(&g_lognets_list_mutex);
450 free( g_lognets_list.data ); 449 free(g_lognets_list.data);
451 g_lognets_list.data = 0; 450 g_lognets_list.data = 0;
452 g_lognets_list.size = g_lognets_list.space = 0; 451 g_lognets_list.size = g_lognets_list.space = 0;
453 pthread_mutex_unlock(&g_lognets_list_mutex); 452 pthread_mutex_unlock(&g_lognets_list_mutex);
454} 453}
455 454
456int loglist_check_address( const ot_ip6 address ) { 455int loglist_check_address(const ot_ip6 address) {
457 int result; 456 int result;
458 pthread_mutex_lock(&g_lognets_list_mutex); 457 pthread_mutex_lock(&g_lognets_list_mutex);
459 result = ( NULL != get_value_for_net( address, &g_lognets_list, sizeof(ot_net)) ); 458 result = (NULL != get_value_for_net(address, &g_lognets_list, sizeof(ot_net)));
460 pthread_mutex_unlock(&g_lognets_list_mutex); 459 pthread_mutex_unlock(&g_lognets_list_mutex);
461 return result; 460 return result;
462} 461}
@@ -464,44 +463,44 @@ int loglist_check_address( const ot_ip6 address ) {
464 463
465#ifdef WANT_IP_FROM_PROXY 464#ifdef WANT_IP_FROM_PROXY
466typedef struct { 465typedef struct {
467 ot_net *proxy; 466 ot_net *proxy;
468 ot_vector networks; 467 ot_vector networks;
469} ot_proxymap; 468} ot_proxymap;
470 469
471static ot_vector g_proxies_list; 470static ot_vector g_proxies_list;
472static pthread_mutex_t g_proxies_list_mutex = PTHREAD_MUTEX_INITIALIZER; 471static pthread_mutex_t g_proxies_list_mutex = PTHREAD_MUTEX_INITIALIZER;
473 472
474int proxylist_add_network( const ot_net *proxy, const ot_net *net ) { 473int proxylist_add_network(const ot_net *proxy, const ot_net *net) {
475 ot_proxymap *map; 474 ot_proxymap *map;
476 int exactmatch, result = 1; 475 int exactmatch, result = 1;
477 pthread_mutex_lock(&g_proxies_list_mutex); 476 pthread_mutex_lock(&g_proxies_list_mutex);
478 477
479 /* If we have a direct hit, use and extend the vector there */ 478 /* If we have a direct hit, use and extend the vector there */
480 map = binary_search( proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch ); 479 map = binary_search(proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch);
481 480
482 if( !map || !exactmatch ) { 481 if (!map || !exactmatch) {
483 /* else see, if we've got overlapping networks 482 /* else see, if we've got overlapping networks
484 and get a new empty vector if not */ 483 and get a new empty vector if not */
485 ot_vector empty; 484 ot_vector empty;
486 memset( &empty, 0, sizeof( ot_vector ) ); 485 memset(&empty, 0, sizeof(ot_vector));
487 map = set_value_for_net( proxy, &g_proxies_list, &empty, sizeof(ot_proxymap)); 486 map = set_value_for_net(proxy, &g_proxies_list, &empty, sizeof(ot_proxymap));
488 } 487 }
489 488
490 if( map && set_value_for_net( net, &map->networks, NULL, sizeof(ot_net) ) ) 489 if (map && set_value_for_net(net, &map->networks, NULL, sizeof(ot_net)))
491 result = 1; 490 result = 1;
492 491
493 pthread_mutex_unlock(&g_proxies_list_mutex); 492 pthread_mutex_unlock(&g_proxies_list_mutex);
494 return result; 493 return result;
495} 494}
496 495
497int proxylist_check_proxy( const ot_ip6 proxy, const ot_ip6 address ) { 496int proxylist_check_proxy(const ot_ip6 proxy, const ot_ip6 address) {
498 int result = 0; 497 int result = 0;
499 ot_proxymap *map; 498 ot_proxymap *map;
500 499
501 pthread_mutex_lock(&g_proxies_list_mutex); 500 pthread_mutex_lock(&g_proxies_list_mutex);
502 501
503 if( ( map = get_value_for_net( proxy, &g_proxies_list, sizeof(ot_proxymap) ) ) ) 502 if ((map = get_value_for_net(proxy, &g_proxies_list, sizeof(ot_proxymap))))
504 if( !address || get_value_for_net( address, &map->networks, sizeof(ot_net) ) ) 503 if (!address || get_value_for_net(address, &map->networks, sizeof(ot_net)))
505 result = 1; 504 result = 1;
506 505
507 pthread_mutex_unlock(&g_proxies_list_mutex); 506 pthread_mutex_unlock(&g_proxies_list_mutex);
@@ -514,44 +513,49 @@ static ot_net g_admin_nets[OT_ADMINIP_MAX];
514static ot_permissions g_admin_nets_permissions[OT_ADMINIP_MAX]; 513static ot_permissions g_admin_nets_permissions[OT_ADMINIP_MAX];
515static unsigned int g_admin_nets_count = 0; 514static unsigned int g_admin_nets_count = 0;
516 515
517int accesslist_bless_net( ot_net *net, ot_permissions permissions ) { 516int accesslist_bless_net(ot_net *net, ot_permissions permissions) {
518 if( g_admin_nets_count >= OT_ADMINIP_MAX ) 517 if (g_admin_nets_count >= OT_ADMINIP_MAX)
519 return -1; 518 return -1;
520 519
521 memcpy(g_admin_nets + g_admin_nets_count, net, sizeof(ot_net)); 520 memcpy(g_admin_nets + g_admin_nets_count, net, sizeof(ot_net));
522 g_admin_nets_permissions[ g_admin_nets_count++ ] = permissions; 521 g_admin_nets_permissions[g_admin_nets_count++] = permissions;
523 522
524#ifdef _DEBUG 523#ifdef _DEBUG
525 { 524 {
526 char _debug[512]; 525 char _debug[512];
527 int off = snprintf( _debug, sizeof(_debug), "Blessing ip net " ); 526 int off = snprintf(_debug, sizeof(_debug), "Blessing ip net ");
528 off += fmt_ip6c(_debug+off, net->address ); 527 off += fmt_ip6c(_debug + off, net->address);
529 if( net->bits < 128) { 528 if (net->bits < 128) {
530 _debug[off++] = '/'; 529 _debug[off++] = '/';
531 if( ip6_isv4mapped(net->address) ) 530 if (ip6_isv4mapped(net->address))
532 off += fmt_long(_debug+off, net->bits-96); 531 off += fmt_long(_debug + off, net->bits - 96);
533 else 532 else
534 off += fmt_long(_debug+off, net->bits); 533 off += fmt_long(_debug + off, net->bits);
535 } 534 }
536 535
537 if( permissions & OT_PERMISSION_MAY_STAT ) off += snprintf( _debug+off, 512-off, " may_fetch_stats" ); 536 if (permissions & OT_PERMISSION_MAY_STAT)
538 if( permissions & OT_PERMISSION_MAY_LIVESYNC ) off += snprintf( _debug+off, 512-off, " may_sync_live" ); 537 off += snprintf(_debug + off, 512 - off, " may_fetch_stats");
539 if( permissions & OT_PERMISSION_MAY_FULLSCRAPE ) off += snprintf( _debug+off, 512-off, " may_fetch_fullscrapes" ); 538 if (permissions & OT_PERMISSION_MAY_LIVESYNC)
540 if( permissions & OT_PERMISSION_MAY_PROXY ) off += snprintf( _debug+off, 512-off, " may_proxy" ); 539 off += snprintf(_debug + off, 512 - off, " may_sync_live");
541 if( !permissions ) off += snprintf( _debug+off, sizeof(_debug)-off, " nothing" ); 540 if (permissions & OT_PERMISSION_MAY_FULLSCRAPE)
541 off += snprintf(_debug + off, 512 - off, " may_fetch_fullscrapes");
542 if (permissions & OT_PERMISSION_MAY_PROXY)
543 off += snprintf(_debug + off, 512 - off, " may_proxy");
544 if (!permissions)
545 off += snprintf(_debug + off, sizeof(_debug) - off, " nothing");
542 _debug[off++] = '.'; 546 _debug[off++] = '.';
543 _debug[off++] = '\n'; 547 _debug[off++] = '\n';
544 (void)write( 2, _debug, off ); 548 (void)write(2, _debug, off);
545 } 549 }
546#endif 550#endif
547 551
548 return 0; 552 return 0;
549} 553}
550 554
551int accesslist_is_blessed( ot_ip6 ip, ot_permissions permissions ) { 555int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions) {
552 unsigned int i; 556 unsigned int i;
553 for( i=0; i<g_admin_nets_count; ++i ) 557 for (i = 0; i < g_admin_nets_count; ++i)
554 if( address_in_net(ip, g_admin_nets + i) && (g_admin_nets_permissions[ i ] & permissions )) 558 if (address_in_net(ip, g_admin_nets + i) && (g_admin_nets_permissions[i] & permissions))
555 return 1; 559 return 1;
556 return 0; 560 return 0;
557} 561}
diff --git a/ot_accesslist.h b/ot_accesslist.h
index a988791..0a7488e 100644
--- a/ot_accesslist.h
+++ b/ot_accesslist.h
@@ -6,16 +6,18 @@
6#ifndef OT_ACCESSLIST_H__ 6#ifndef OT_ACCESSLIST_H__
7#define OT_ACCESSLIST_H__ 7#define OT_ACCESSLIST_H__
8 8
9#if defined ( WANT_ACCESSLIST_BLACK ) && defined ( WANT_ACCESSLIST_WHITE ) 9#include "trackerlogic.h"
10# error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive. 10
11#if defined(WANT_ACCESSLIST_BLACK) && defined(WANT_ACCESSLIST_WHITE)
12#error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive.
11#endif 13#endif
12 14
13#if defined ( WANT_ACCESSLIST_BLACK ) || defined (WANT_ACCESSLIST_WHITE ) 15#if defined(WANT_ACCESSLIST_BLACK) || defined(WANT_ACCESSLIST_WHITE)
14#define WANT_ACCESSLIST 16#define WANT_ACCESSLIST
15void accesslist_init( void ); 17void accesslist_init(void);
16void accesslist_deinit( void ); 18void accesslist_deinit(void);
17int accesslist_hashisvalid( ot_hash hash ); 19int accesslist_hashisvalid(ot_hash hash);
18void accesslist_cleanup( void ); 20void accesslist_cleanup(void);
19 21
20extern char *g_accesslist_filename; 22extern char *g_accesslist_filename;
21#ifdef WANT_DYNAMIC_ACCESSLIST 23#ifdef WANT_DYNAMIC_ACCESSLIST
@@ -25,16 +27,16 @@ extern char *g_accesslist_pipe_delete;
25 27
26#else 28#else
27#ifdef WANT_DYNAMIC_ACCESSLIST 29#ifdef WANT_DYNAMIC_ACCESSLIST
28# error WANT_DYNAMIC_ACCESSLIST needs either WANT_ACCESSLIST_BLACK or WANT_ACCESSLIST_WHITE 30#error WANT_DYNAMIC_ACCESSLIST needs either WANT_ACCESSLIST_BLACK or WANT_ACCESSLIST_WHITE
29#endif 31#endif
30 32
31#define accesslist_init( accesslist_filename ) 33#define accesslist_init(accesslist_filename)
32#define accesslist_deinit( ) 34#define accesslist_deinit()
33#define accesslist_hashisvalid( hash ) 1 35#define accesslist_hashisvalid(hash) 1
34#endif 36#endif
35 37
36/* Test if an address is subset of an ot_net, return value is considered a bool */ 38/* Test if an address is subset of an ot_net, return value is considered a bool */
37int address_in_net( const ot_ip6 address, const ot_net *net ); 39int address_in_net(const ot_ip6 address, const ot_net *net);
38 40
39/* Store a value into a vector of struct { ot_net net, uint8_t[x] value } member; 41/* Store a value into a vector of struct { ot_net net, uint8_t[x] value } member;
40 returns NULL 42 returns NULL
@@ -45,18 +47,17 @@ int address_in_net( const ot_ip6 address, const ot_net *net );
45 returns pointer to new member in vector for success 47 returns pointer to new member in vector for success
46 member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping 48 member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping
47*/ 49*/
48void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size ); 50void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size);
49 51
50/* Takes a vector filled with struct { ot_net net, uint8_t[x] value } member; 52/* Takes a vector filled with struct { ot_net net, uint8_t[x] value } member;
51 Returns pointer to _member_ associated with the net, or NULL if not found 53 Returns pointer to _member_ associated with the net, or NULL if not found
52 member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping 54 member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping
53*/ 55*/
54void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size ); 56void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size);
55
56 57
57#ifdef WANT_IP_FROM_PROXY 58#ifdef WANT_IP_FROM_PROXY
58int proxylist_add_network( const ot_net *proxy, const ot_net *net ); 59int proxylist_add_network(const ot_net *proxy, const ot_net *net);
59int proxylist_check_network( const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */ ); 60int proxylist_check_network(const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */);
60#endif 61#endif
61 62
62#ifdef WANT_FULLLOG_NETWORKS 63#ifdef WANT_FULLLOG_NETWORKS
@@ -70,10 +71,10 @@ struct ot_log {
70}; 71};
71extern ot_log *g_logchain_first, *g_logchain_last; 72extern ot_log *g_logchain_first, *g_logchain_last;
72 73
73void loglist_add_network( const ot_net *net ); 74void loglist_add_network(const ot_net *net);
74void loglist_reset( ); 75void loglist_reset();
75int loglist_check_address( const ot_ip6 address ); 76int loglist_check_address(const ot_ip6 address);
76#endif 77#endif
77 78
78typedef enum { 79typedef enum {
79 OT_PERMISSION_MAY_FULLSCRAPE = 0x1, 80 OT_PERMISSION_MAY_FULLSCRAPE = 0x1,
@@ -82,7 +83,7 @@ typedef enum {
82 OT_PERMISSION_MAY_PROXY = 0x8 83 OT_PERMISSION_MAY_PROXY = 0x8
83} ot_permissions; 84} ot_permissions;
84 85
85int accesslist_bless_net( ot_net *net, ot_permissions permissions ); 86int accesslist_bless_net(ot_net *net, ot_permissions permissions);
86int accesslist_is_blessed( ot_ip6 ip, ot_permissions permissions ); 87int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions);
87 88
88#endif 89#endif
diff --git a/ot_clean.c b/ot_clean.c
index 739e785..2506cc1 100644
--- a/ot_clean.c
+++ b/ot_clean.c
@@ -5,80 +5,79 @@
5 5
6/* System */ 6/* System */
7#include <pthread.h> 7#include <pthread.h>
8#include <unistd.h>
9#include <string.h> 8#include <string.h>
9#include <unistd.h>
10 10
11/* Libowfat */ 11/* Libowfat */
12#include "io.h" 12#include "io.h"
13 13
14/* Opentracker */ 14/* Opentracker */
15#include "trackerlogic.h" 15#include "ot_accesslist.h"
16#include "ot_mutex.h"
17#include "ot_vector.h"
18#include "ot_clean.h" 16#include "ot_clean.h"
17#include "ot_mutex.h"
19#include "ot_stats.h" 18#include "ot_stats.h"
20#include "ot_accesslist.h" 19#include "ot_vector.h"
20#include "trackerlogic.h"
21 21
22/* Returns amount of removed peers */ 22/* Returns amount of removed peers */
23static ssize_t clean_single_bucket( ot_peer *peers, size_t peer_count, size_t peer_size, time_t timedout, int *removed_seeders ) { 23static ssize_t clean_single_bucket(ot_peer *peers, size_t peer_count, size_t peer_size, time_t timedout, int *removed_seeders) {
24 ot_peer *last_peer = peers + peer_count * peer_size, *insert_point; 24 ot_peer *last_peer = peers + peer_count * peer_size, *insert_point;
25 25
26 /* Two scan modes: unless there is one peer removed, just increase ot_peertime */ 26 /* Two scan modes: unless there is one peer removed, just increase ot_peertime */
27 while( peers < last_peer ) { 27 while (peers < last_peer) {
28 time_t timediff = timedout + OT_PEERTIME( peers, peer_size ); 28 time_t timediff = timedout + OT_PEERTIME(peers, peer_size);
29 if( timediff >= OT_PEER_TIMEOUT ) 29 if (timediff >= OT_PEER_TIMEOUT)
30 break; 30 break;
31 OT_PEERTIME( peers, peer_size ) = timediff; 31 OT_PEERTIME(peers, peer_size) = timediff;
32 peers += peer_size; 32 peers += peer_size;
33 } 33 }
34 34
35 /* If we at least remove one peer, we have to copy */ 35 /* If we at least remove one peer, we have to copy */
36 for( insert_point = peers; peers < last_peer; peers += peer_size ) { 36 for (insert_point = peers; peers < last_peer; peers += peer_size) {
37 time_t timediff = timedout + OT_PEERTIME( peers, peer_size ); 37 time_t timediff = timedout + OT_PEERTIME(peers, peer_size);
38 38
39 if( timediff < OT_PEER_TIMEOUT ) { 39 if (timediff < OT_PEER_TIMEOUT) {
40 OT_PEERTIME( peers, peer_size ) = timediff; 40 OT_PEERTIME(peers, peer_size) = timediff;
41 memcpy( insert_point, peers, peer_size); 41 memcpy(insert_point, peers, peer_size);
42 insert_point += peer_size; 42 insert_point += peer_size;
43 } else 43 } else if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING)
44 if( OT_PEERFLAG_D( peers, peer_size ) & PEER_FLAG_SEEDING ) 44 (*removed_seeders)++;
45 (*removed_seeders)++;
46 } 45 }
47 46
48 return (peers - insert_point) / peer_size; 47 return (peers - insert_point) / peer_size;
49} 48}
50 49
51int clean_single_peer_list( ot_peerlist *peer_list, size_t peer_size ) { 50int clean_single_peer_list(ot_peerlist *peer_list, size_t peer_size) {
52 ot_vector *peer_vector = &peer_list->peers; 51 ot_vector *peer_vector = &peer_list->peers;
53 time_t timedout = (time_t)( g_now_minutes - peer_list->base ); 52 time_t timedout = (time_t)(g_now_minutes - peer_list->base);
54 int num_buckets = 1, removed_seeders = 0; 53 int num_buckets = 1, removed_seeders = 0;
55 54
56 /* No need to clean empty torrent */ 55 /* No need to clean empty torrent */
57 if( !timedout ) 56 if (!timedout)
58 return 0; 57 return 0;
59 58
60 /* Torrent has idled out */ 59 /* Torrent has idled out */
61 if( timedout > OT_TORRENT_TIMEOUT ) 60 if (timedout > OT_TORRENT_TIMEOUT)
62 return 1; 61 return 1;
63 62
64 /* Nothing to be cleaned here? Test if torrent is worth keeping */ 63 /* Nothing to be cleaned here? Test if torrent is worth keeping */
65 if( timedout > OT_PEER_TIMEOUT ) { 64 if (timedout > OT_PEER_TIMEOUT) {
66 if( !peer_list->peer_count ) 65 if (!peer_list->peer_count)
67 return peer_list->down_count ? 0 : 1; 66 return peer_list->down_count ? 0 : 1;
68 timedout = OT_PEER_TIMEOUT; 67 timedout = OT_PEER_TIMEOUT;
69 } 68 }
70 69
71 if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { 70 if (OT_PEERLIST_HASBUCKETS(peer_list)) {
72 num_buckets = peer_vector->size; 71 num_buckets = peer_vector->size;
73 peer_vector = (ot_vector *)peer_vector->data; 72 peer_vector = (ot_vector *)peer_vector->data;
74 } 73 }
75 74
76 while( num_buckets-- ) { 75 while (num_buckets--) {
77 size_t removed_peers = clean_single_bucket( peer_vector->data, peer_vector->size, peer_size, timedout, &removed_seeders ); 76 size_t removed_peers = clean_single_bucket(peer_vector->data, peer_vector->size, peer_size, timedout, &removed_seeders);
78 peer_list->peer_count -= removed_peers; 77 peer_list->peer_count -= removed_peers;
79 peer_vector->size -= removed_peers; 78 peer_vector->size -= removed_peers;
80 if( removed_peers ) 79 if (removed_peers)
81 vector_fixup_peers( peer_vector, peer_size ); 80 vector_fixup_peers(peer_vector, peer_size);
82 81
83 /* Skip to next bucket, a vector containing peers */ 82 /* Skip to next bucket, a vector containing peers */
84 ++peer_vector; 83 ++peer_vector;
@@ -87,10 +86,10 @@ int clean_single_peer_list( ot_peerlist *peer_list, size_t peer_size ) {
87 peer_list->seed_count -= removed_seeders; 86 peer_list->seed_count -= removed_seeders;
88 87
89 /* See if we need to convert a torrent from simple vector to bucket list */ 88 /* See if we need to convert a torrent from simple vector to bucket list */
90 if( ( peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT ) || OT_PEERLIST_HASBUCKETS(peer_list) ) 89 if ((peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT) || OT_PEERLIST_HASBUCKETS(peer_list))
91 vector_redistribute_buckets( peer_list, peer_size ); 90 vector_redistribute_buckets(peer_list, peer_size);
92 91
93 if( peer_list->peer_count ) 92 if (peer_list->peer_count)
94 peer_list->base = g_now_minutes; 93 peer_list->base = g_now_minutes;
95 else { 94 else {
96 /* When we got here, the last time that torrent 95 /* When we got here, the last time that torrent
@@ -103,34 +102,33 @@ int clean_single_peer_list( ot_peerlist *peer_list, size_t peer_size ) {
103/* Clean a single torrent 102/* Clean a single torrent
104 return 1 if torrent timed out 103 return 1 if torrent timed out
105*/ 104*/
106int clean_single_torrent( ot_torrent *torrent ) { 105int clean_single_torrent(ot_torrent *torrent) {
107 return clean_single_peer_list( torrent->peer_list6, OT_PEER_SIZE6) * 106 return clean_single_peer_list(torrent->peer_list6, OT_PEER_SIZE6) * clean_single_peer_list(torrent->peer_list4, OT_PEER_SIZE4);
108 clean_single_peer_list( torrent->peer_list4, OT_PEER_SIZE4);
109} 107}
110 108
111/* Clean up all peers in current bucket, remove timedout pools and 109/* Clean up all peers in current bucket, remove timedout pools and
112 torrents */ 110 torrents */
113static void * clean_worker( void * args ) { 111static void *clean_worker(void *args) {
114 (void) args; 112 (void)args;
115 while( 1 ) { 113 while (1) {
116 int bucket = OT_BUCKET_COUNT; 114 int bucket = OT_BUCKET_COUNT;
117 while( bucket-- ) { 115 while (bucket--) {
118 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 116 ot_vector *torrents_list = mutex_bucket_lock(bucket);
119 size_t toffs; 117 size_t toffs;
120 int delta_torrentcount = 0; 118 int delta_torrentcount = 0;
121 119
122 for( toffs=0; toffs<torrents_list->size; ++toffs ) { 120 for (toffs = 0; toffs < torrents_list->size; ++toffs) {
123 ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + toffs; 121 ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + toffs;
124 if( clean_single_torrent( torrent ) ) { 122 if (clean_single_torrent(torrent)) {
125 vector_remove_torrent( torrents_list, torrent ); 123 vector_remove_torrent(torrents_list, torrent);
126 --delta_torrentcount; 124 --delta_torrentcount;
127 --toffs; 125 --toffs;
128 } 126 }
129 } 127 }
130 mutex_bucket_unlock( bucket, delta_torrentcount ); 128 mutex_bucket_unlock(bucket, delta_torrentcount);
131 if( !g_opentracker_running ) 129 if (!g_opentracker_running)
132 return NULL; 130 return NULL;
133 usleep( OT_CLEAN_SLEEP ); 131 usleep(OT_CLEAN_SLEEP);
134 } 132 }
135 stats_cleanup(); 133 stats_cleanup();
136#ifdef WANT_ACCESSLIST 134#ifdef WANT_ACCESSLIST
@@ -141,12 +139,8 @@ static void * clean_worker( void * args ) {
141} 139}
142 140
143static pthread_t thread_id; 141static pthread_t thread_id;
144void clean_init( void ) { 142void clean_init(void) { pthread_create(&thread_id, NULL, clean_worker, NULL); }
145 pthread_create( &thread_id, NULL, clean_worker, NULL );
146}
147 143
148void clean_deinit( void ) { 144void clean_deinit(void) { pthread_cancel(thread_id); }
149 pthread_cancel( thread_id );
150}
151 145
152const char *g_version_clean_c = "$Source$: $Revision$\n"; 146const char *g_version_clean_c = "$Source$: $Revision$\n";
diff --git a/ot_clean.h b/ot_clean.h
index 956770a..e8bcdc1 100644
--- a/ot_clean.h
+++ b/ot_clean.h
@@ -7,13 +7,13 @@
7#define OT_CLEAN_H__ 7#define OT_CLEAN_H__
8 8
9/* The amount of time a clean cycle should take */ 9/* The amount of time a clean cycle should take */
10#define OT_CLEAN_INTERVAL_MINUTES 2 10#define OT_CLEAN_INTERVAL_MINUTES 2
11 11
12/* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */ 12/* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */
13#define OT_CLEAN_SLEEP ( ( ( OT_CLEAN_INTERVAL_MINUTES ) * 60 * 1000000 ) / ( OT_BUCKET_COUNT ) ) 13#define OT_CLEAN_SLEEP (((OT_CLEAN_INTERVAL_MINUTES) * 60 * 1000000) / (OT_BUCKET_COUNT))
14 14
15void clean_init( void ); 15void clean_init(void);
16void clean_deinit( void ); 16void clean_deinit(void);
17int clean_single_torrent( ot_torrent *torrent ); 17int clean_single_torrent(ot_torrent *torrent);
18 18
19#endif 19#endif
diff --git a/ot_fullscrape.c b/ot_fullscrape.c
index 86b9e26..8f8ee9d 100644
--- a/ot_fullscrape.c
+++ b/ot_fullscrape.c
@@ -6,11 +6,11 @@
6#ifdef WANT_FULLSCRAPE 6#ifdef WANT_FULLSCRAPE
7 7
8/* System */ 8/* System */
9#include <sys/param.h> 9#include <arpa/inet.h>
10#include <pthread.h>
10#include <stdio.h> 11#include <stdio.h>
11#include <string.h> 12#include <string.h>
12#include <pthread.h> 13#include <sys/param.h>
13#include <arpa/inet.h>
14#ifdef WANT_COMPRESSION_GZIP 14#ifdef WANT_COMPRESSION_GZIP
15#include <zlib.h> 15#include <zlib.h>
16#endif 16#endif
@@ -21,46 +21,56 @@
21#include "textcode.h" 21#include "textcode.h"
22 22
23/* Opentracker */ 23/* Opentracker */
24#include "trackerlogic.h"
25#include "ot_mutex.h"
26#include "ot_iovec.h"
27#include "ot_fullscrape.h" 24#include "ot_fullscrape.h"
25#include "ot_iovec.h"
26#include "ot_mutex.h"
27#include "trackerlogic.h"
28 28
29/* Fetch full scrape info for all torrents 29/* Fetch full scrape info for all torrents
30 Full scrapes usually are huge and one does not want to 30 Full scrapes usually are huge and one does not want to
31 allocate more memory. So lets get them in 512k units 31 allocate more memory. So lets get them in 512k units
32*/ 32*/
33#define OT_SCRAPE_CHUNK_SIZE (1024*1024) 33#define OT_SCRAPE_CHUNK_SIZE (1024 * 1024)
34 34
35/* "d8:completei%zde10:downloadedi%zde10:incompletei%zdee" */ 35/* "d8:completei%zde10:downloadedi%zde10:incompletei%zdee" */
36#define OT_SCRAPE_MAXENTRYLEN 256 36#define OT_SCRAPE_MAXENTRYLEN 256
37 37
38/* Forward declaration */ 38/* Forward declaration */
39static void fullscrape_make( int taskid, ot_tasktype mode); 39static void fullscrape_make(int taskid, ot_tasktype mode);
40#ifdef WANT_COMPRESSION_GZIP 40#ifdef WANT_COMPRESSION_GZIP
41static void fullscrape_make_gzip( int taskid, ot_tasktype mode); 41static void fullscrape_make_gzip(int taskid, ot_tasktype mode);
42#endif 42#endif
43 43
44/* Converter function from memory to human readable hex strings 44/* Converter function from memory to human readable hex strings
45 XXX - Duplicated from ot_stats. Needs fix. */ 45 XXX - Duplicated from ot_stats. Needs fix. */
46static char*to_hex(char*d,uint8_t*s){char*m="0123456789ABCDEF";char *t=d;char*e=d+40;while(d<e){*d++=m[*s>>4];*d++=m[*s++&15];}*d=0;return t;} 46static char *to_hex(char *d, uint8_t *s) {
47 char *m = "0123456789ABCDEF";
48 char *t = d;
49 char *e = d + 40;
50 while (d < e) {
51 *d++ = m[*s >> 4];
52 *d++ = m[*s++ & 15];
53 }
54 *d = 0;
55 return t;
56}
47 57
48/* This is the entry point into this worker thread 58/* This is the entry point into this worker thread
49 It grabs tasks from mutex_tasklist and delivers results back 59 It grabs tasks from mutex_tasklist and delivers results back
50*/ 60*/
51static void * fullscrape_worker( void * args ) { 61static void *fullscrape_worker(void *args) {
52 (void) args; 62 (void)args;
53 63
54 while( g_opentracker_running ) { 64 while (g_opentracker_running) {
55 ot_tasktype tasktype = TASK_FULLSCRAPE; 65 ot_tasktype tasktype = TASK_FULLSCRAPE;
56 ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); 66 ot_taskid taskid = mutex_workqueue_poptask(&tasktype);
57#ifdef WANT_COMPRESSION_GZIP 67#ifdef WANT_COMPRESSION_GZIP
58 if (tasktype & TASK_FLAG_GZIP) 68 if (tasktype & TASK_FLAG_GZIP)
59 fullscrape_make_gzip( taskid, tasktype ); 69 fullscrape_make_gzip(taskid, tasktype);
60 else 70 else
61#endif 71#endif
62 fullscrape_make( taskid, tasktype ); 72 fullscrape_make(taskid, tasktype);
63 mutex_workqueue_pushchunked( taskid, NULL ); 73 mutex_workqueue_pushchunked(taskid, NULL);
64 } 74 }
65 return NULL; 75 return NULL;
66} 76}
@@ -83,80 +93,87 @@ static char * fullscrape_write_one( ot_tasktype mode, char *r, ot_torrent *torre
83 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; 93 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
84 size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count; 94 size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count;
85 95
86 switch( mode & TASK_TASK_MASK ) { 96 switch (mode & TASK_TASK_MASK) {
87 case TASK_FULLSCRAPE: 97 case TASK_FULLSCRAPE:
88 default: 98 default:
89 /* push hash as bencoded string */ 99 /* push hash as bencoded string */
90 *r++='2'; *r++='0'; *r++=':'; 100 *r++ = '2';
91 memcpy( r, hash, sizeof(ot_hash) ); r += sizeof(ot_hash); 101 *r++ = '0';
92 /* push rest of the scrape string */ 102 *r++ = ':';
93 r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", seed_count, down_count, peer_count-seed_count ); 103 memcpy(r, hash, sizeof(ot_hash));
94 104 r += sizeof(ot_hash);
95 break; 105 /* push rest of the scrape string */
96 case TASK_FULLSCRAPE_TPB_ASCII: 106 r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", seed_count, down_count, peer_count - seed_count);
97 to_hex( r, *hash ); r+= 2 * sizeof(ot_hash); 107
98 r += sprintf( r, ":%zd:%zd\n", seed_count, peer_count-seed_count ); 108 break;
99 break; 109 case TASK_FULLSCRAPE_TPB_ASCII:
100 case TASK_FULLSCRAPE_TPB_ASCII_PLUS: 110 to_hex(r, *hash);
101 to_hex( r, *hash ); r+= 2 * sizeof(ot_hash); 111 r += 2 * sizeof(ot_hash);
102 r += sprintf( r, ":%zd:%zd:%zd\n", seed_count, peer_count-seed_count, down_count ); 112 r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count);
103 break; 113 break;
104 case TASK_FULLSCRAPE_TPB_BINARY: 114 case TASK_FULLSCRAPE_TPB_ASCII_PLUS:
105 memcpy( r, *hash, sizeof(ot_hash) ); r += sizeof(ot_hash); 115 to_hex(r, *hash);
106 *(uint32_t*)(r+0) = htonl( (uint32_t) seed_count ); 116 r += 2 * sizeof(ot_hash);
107 *(uint32_t*)(r+4) = htonl( (uint32_t)( peer_count-seed_count) ); 117 r += sprintf(r, ":%zd:%zd:%zd\n", seed_count, peer_count - seed_count, down_count);
108 r+=8; 118 break;
109 break; 119 case TASK_FULLSCRAPE_TPB_BINARY:
110 case TASK_FULLSCRAPE_TPB_URLENCODED: 120 memcpy(r, *hash, sizeof(ot_hash));
111 r += fmt_urlencoded( r, (char *)*hash, 20 ); 121 r += sizeof(ot_hash);
112 r += sprintf( r, ":%zd:%zd\n", seed_count, peer_count-seed_count ); 122 *(uint32_t *)(r + 0) = htonl((uint32_t)seed_count);
113 break; 123 *(uint32_t *)(r + 4) = htonl((uint32_t)(peer_count - seed_count));
114 case TASK_FULLSCRAPE_TRACKERSTATE: 124 r += 8;
115 to_hex( r, *hash ); r+= 2 * sizeof(ot_hash); 125 break;
116 r += sprintf( r, ":%zd:%zd\n", torrent->peer_list6->base, down_count ); 126 case TASK_FULLSCRAPE_TPB_URLENCODED:
117 break; 127 r += fmt_urlencoded(r, (char *)*hash, 20);
118 } 128 r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count);
119 return r; 129 break;
130 case TASK_FULLSCRAPE_TRACKERSTATE:
131 to_hex(r, *hash);
132 r += 2 * sizeof(ot_hash);
133 r += sprintf(r, ":%zd:%zd\n", torrent->peer_list6->base, down_count);
134 break;
135 }
136 return r;
120} 137}
121 138
122static void fullscrape_make( int taskid, ot_tasktype mode ) { 139static void fullscrape_make(int taskid, ot_tasktype mode) {
123 int bucket; 140 int bucket;
124 char *r, *re; 141 char *r, *re;
125 struct iovec iovector = { NULL, 0 }; 142 struct iovec iovector = {NULL, 0};
126 143
127 /* Setup return vector... */ 144 /* Setup return vector... */
128 r = iovector.iov_base = malloc( OT_SCRAPE_CHUNK_SIZE ); 145 r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
129 if( !r ) 146 if (!r)
130 return; 147 return;
131 148
132 /* re points to low watermark */ 149 /* re points to low watermark */
133 re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; 150 re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN;
134 151
135 if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) 152 if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE)
136 r += sprintf( r, "d5:filesd" ); 153 r += sprintf(r, "d5:filesd");
137 154
138 /* For each bucket... */ 155 /* For each bucket... */
139 for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 156 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
140 /* Get exclusive access to that bucket */ 157 /* Get exclusive access to that bucket */
141 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 158 ot_vector *torrents_list = mutex_bucket_lock(bucket);
142 ot_torrent *torrents = (ot_torrent*)(torrents_list->data); 159 ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
143 size_t i; 160 size_t i;
144 161
145 /* For each torrent in this bucket.. */ 162 /* For each torrent in this bucket.. */
146 for( i=0; i<torrents_list->size; ++i ) { 163 for (i = 0; i < torrents_list->size; ++i) {
147 r = fullscrape_write_one( mode, r, torrents+i, &torrents[i].hash ); 164 r = fullscrape_write_one(mode, r, torrents + i, &torrents[i].hash);
148 165
149 if( r > re) { 166 if (r > re) {
150 iovector.iov_len = r - (char *)iovector.iov_base; 167 iovector.iov_len = r - (char *)iovector.iov_base;
151 168
152 if (mutex_workqueue_pushchunked(taskid, &iovector) ) { 169 if (mutex_workqueue_pushchunked(taskid, &iovector)) {
153 free(iovector.iov_base); 170 free(iovector.iov_base);
154 return mutex_bucket_unlock( bucket, 0 ); 171 return mutex_bucket_unlock(bucket, 0);
155 } 172 }
156 /* Allocate a fresh output buffer */ 173 /* Allocate a fresh output buffer */
157 r = iovector.iov_base = malloc( OT_SCRAPE_CHUNK_SIZE ); 174 r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
158 if( !r ) 175 if (!r)
159 return mutex_bucket_unlock( bucket, 0 ); 176 return mutex_bucket_unlock(bucket, 0);
160 177
161 /* re points to low watermark */ 178 /* re points to low watermark */
162 re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; 179 re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN;
@@ -164,132 +181,132 @@ static void fullscrape_make( int taskid, ot_tasktype mode ) {
164 } 181 }
165 182
166 /* All torrents done: release lock on current bucket */ 183 /* All torrents done: release lock on current bucket */
167 mutex_bucket_unlock( bucket, 0 ); 184 mutex_bucket_unlock(bucket, 0);
168 185
169 /* Parent thread died? */ 186 /* Parent thread died? */
170 if( !g_opentracker_running ) 187 if (!g_opentracker_running)
171 return; 188 return;
172 } 189 }
173 190
174 if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) 191 if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE)
175 r += sprintf( r, "ee" ); 192 r += sprintf(r, "ee");
176 193
177 /* Send rest of data */ 194 /* Send rest of data */
178 iovector.iov_len = r - (char *)iovector.iov_base; 195 iovector.iov_len = r - (char *)iovector.iov_base;
179 if( mutex_workqueue_pushchunked(taskid, &iovector) ) 196 if (mutex_workqueue_pushchunked(taskid, &iovector))
180 free(iovector.iov_base); 197 free(iovector.iov_base);
181} 198}
182 199
183#ifdef WANT_COMPRESSION_GZIP 200#ifdef WANT_COMPRESSION_GZIP
184 201
185static void fullscrape_make_gzip( int taskid, ot_tasktype mode) { 202static void fullscrape_make_gzip(int taskid, ot_tasktype mode) {
186 int bucket; 203 int bucket;
187 char *r; 204 char *r;
188 struct iovec iovector = { NULL, 0 }; 205 struct iovec iovector = {NULL, 0};
189 int zres; 206 int zres;
190 z_stream strm; 207 z_stream strm;
191fprintf(stderr, "GZIP path\n"); 208 fprintf(stderr, "GZIP path\n");
192 /* Setup return vector... */ 209 /* Setup return vector... */
193 iovector.iov_base = malloc( OT_SCRAPE_CHUNK_SIZE ); 210 iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
194 if( !iovector.iov_base ) 211 if (!iovector.iov_base)
195 return; 212 return;
196 213
197 byte_zero( &strm, sizeof(strm) ); 214 byte_zero(&strm, sizeof(strm));
198 strm.next_out = (uint8_t*)iovector.iov_base; 215 strm.next_out = (uint8_t *)iovector.iov_base;
199 strm.avail_out = OT_SCRAPE_CHUNK_SIZE; 216 strm.avail_out = OT_SCRAPE_CHUNK_SIZE;
200 if( deflateInit2(&strm,7,Z_DEFLATED,31,9,Z_DEFAULT_STRATEGY) != Z_OK ) 217 if (deflateInit2(&strm, 7, Z_DEFLATED, 31, 9, Z_DEFAULT_STRATEGY) != Z_OK)
201 fprintf( stderr, "not ok.\n" ); 218 fprintf(stderr, "not ok.\n");
202 219
203 if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) { 220 if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
204 strm.next_in = (uint8_t*)"d5:filesd"; 221 strm.next_in = (uint8_t *)"d5:filesd";
205 strm.avail_in = strlen("d5:filesd"); 222 strm.avail_in = strlen("d5:filesd");
206 zres = deflate( &strm, Z_NO_FLUSH ); 223 zres = deflate(&strm, Z_NO_FLUSH);
207 } 224 }
208 225
209 /* For each bucket... */ 226 /* For each bucket... */
210 for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 227 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
211 /* Get exclusive access to that bucket */ 228 /* Get exclusive access to that bucket */
212 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 229 ot_vector *torrents_list = mutex_bucket_lock(bucket);
213 ot_torrent *torrents = (ot_torrent*)(torrents_list->data); 230 ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
214 size_t i; 231 size_t i;
215 232
216 /* For each torrent in this bucket.. */ 233 /* For each torrent in this bucket.. */
217 for( i=0; i<torrents_list->size; ++i ) { 234 for (i = 0; i < torrents_list->size; ++i) {
218 char compress_buffer[OT_SCRAPE_MAXENTRYLEN]; 235 char compress_buffer[OT_SCRAPE_MAXENTRYLEN];
219 r = fullscrape_write_one( mode, compress_buffer, torrents+i, &torrents[i].hash ); 236 r = fullscrape_write_one(mode, compress_buffer, torrents + i, &torrents[i].hash);
220 strm.next_in = (uint8_t*)compress_buffer; 237 strm.next_in = (uint8_t *)compress_buffer;
221 strm.avail_in = r - compress_buffer; 238 strm.avail_in = r - compress_buffer;
222 zres = deflate( &strm, Z_NO_FLUSH ); 239 zres = deflate(&strm, Z_NO_FLUSH);
223 if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) ) 240 if ((zres < Z_OK) && (zres != Z_BUF_ERROR))
224 fprintf( stderr, "deflate() failed while in fullscrape_make().\n" ); 241 fprintf(stderr, "deflate() failed while in fullscrape_make().\n");
225 242
226 /* Check if there still is enough buffer left */ 243 /* Check if there still is enough buffer left */
227 while( !strm.avail_out ) { 244 while (!strm.avail_out) {
228 iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base; 245 iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base;
229 246
230 if (mutex_workqueue_pushchunked(taskid, &iovector) ) { 247 if (mutex_workqueue_pushchunked(taskid, &iovector)) {
231 free(iovector.iov_base); 248 free(iovector.iov_base);
232 return mutex_bucket_unlock( bucket, 0 ); 249 return mutex_bucket_unlock(bucket, 0);
233 } 250 }
234 /* Allocate a fresh output buffer */ 251 /* Allocate a fresh output buffer */
235 iovector.iov_base = malloc( OT_SCRAPE_CHUNK_SIZE ); 252 iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
236 if( !iovector.iov_base ) { 253 if (!iovector.iov_base) {
237 fprintf( stderr, "Out of memory trying to claim ouput buffer\n" ); 254 fprintf(stderr, "Out of memory trying to claim ouput buffer\n");
238 deflateEnd(&strm); 255 deflateEnd(&strm);
239 return mutex_bucket_unlock( bucket, 0 ); 256 return mutex_bucket_unlock(bucket, 0);
240 } 257 }
241 strm.next_out = (uint8_t*)iovector.iov_base; 258 strm.next_out = (uint8_t *)iovector.iov_base;
242 strm.avail_out = OT_SCRAPE_CHUNK_SIZE; 259 strm.avail_out = OT_SCRAPE_CHUNK_SIZE;
243 zres = deflate( &strm, Z_NO_FLUSH ); 260 zres = deflate(&strm, Z_NO_FLUSH);
244 if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) ) 261 if ((zres < Z_OK) && (zres != Z_BUF_ERROR))
245 fprintf( stderr, "deflate() failed while in fullscrape_make().\n" ); 262 fprintf(stderr, "deflate() failed while in fullscrape_make().\n");
246 } 263 }
247 } 264 }
248 265
249 /* All torrents done: release lock on current bucket */ 266 /* All torrents done: release lock on current bucket */
250 mutex_bucket_unlock( bucket, 0 ); 267 mutex_bucket_unlock(bucket, 0);
251 268
252 /* Parent thread died? */ 269 /* Parent thread died? */
253 if( !g_opentracker_running ) 270 if (!g_opentracker_running)
254 return; 271 return;
255 } 272 }
256 273
257 if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) { 274 if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
258 strm.next_in = (uint8_t*)"ee"; 275 strm.next_in = (uint8_t *)"ee";
259 strm.avail_in = strlen("ee"); 276 strm.avail_in = strlen("ee");
260 } 277 }
261 278
262 if( deflate( &strm, Z_FINISH ) < Z_OK ) 279 if (deflate(&strm, Z_FINISH) < Z_OK)
263 fprintf( stderr, "deflate() failed while in fullscrape_make()'s endgame.\n" ); 280 fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n");
264 281
265 iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base; 282 iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base;
266 if (mutex_workqueue_pushchunked(taskid, &iovector) ) { 283 if (mutex_workqueue_pushchunked(taskid, &iovector)) {
267 free(iovector.iov_base); 284 free(iovector.iov_base);
268 return mutex_bucket_unlock( bucket, 0 ); 285 return mutex_bucket_unlock(bucket, 0);
269 } 286 }
270 287
271 { 288 {
272 unsigned int pending; 289 unsigned int pending;
273 int bits; 290 int bits;
274 deflatePending( &strm, &pending, &bits); 291 deflatePending(&strm, &pending, &bits);
275 pending += ( bits ? 1 : 0 ); 292 pending += (bits ? 1 : 0);
276 293
277 if (pending) { 294 if (pending) {
278 /* Allocate a fresh output buffer */ 295 /* Allocate a fresh output buffer */
279 iovector.iov_base = malloc( pending ); 296 iovector.iov_base = malloc(pending);
280 iovector.iov_len = pending; 297 iovector.iov_len = pending;
281 298
282 if( !iovector.iov_base ) { 299 if (!iovector.iov_base) {
283 fprintf( stderr, "Problem with iovec_fix_increase_or_free\n" ); 300 fprintf(stderr, "Problem with iovec_fix_increase_or_free\n");
284 deflateEnd(&strm); 301 deflateEnd(&strm);
285 return mutex_bucket_unlock( bucket, 0 ); 302 return mutex_bucket_unlock(bucket, 0);
286 } 303 }
287 strm.next_out = iovector.iov_base; 304 strm.next_out = iovector.iov_base;
288 strm.avail_out = pending; 305 strm.avail_out = pending;
289 if( deflate( &strm, Z_FINISH ) < Z_OK ) 306 if (deflate(&strm, Z_FINISH) < Z_OK)
290 fprintf( stderr, "deflate() failed while in fullscrape_make()'s endgame.\n" ); 307 fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n");
291 308
292 if( mutex_workqueue_pushchunked(taskid, &iovector) ) 309 if (mutex_workqueue_pushchunked(taskid, &iovector))
293 free(iovector.iov_base); 310 free(iovector.iov_base);
294 } 311 }
295 } 312 }
diff --git a/ot_fullscrape.h b/ot_fullscrape.h
index 0f920ec..bbb2a3f 100644
--- a/ot_fullscrape.h
+++ b/ot_fullscrape.h
@@ -8,9 +8,11 @@
8 8
9#ifdef WANT_FULLSCRAPE 9#ifdef WANT_FULLSCRAPE
10 10
11void fullscrape_init( ); 11#include "ot_mutex.h"
12void fullscrape_deinit( ); 12
13void fullscrape_deliver( int64 sock, ot_tasktype tasktype ); 13void fullscrape_init();
14void fullscrape_deinit();
15void fullscrape_deliver(int64 sock, ot_tasktype tasktype);
14 16
15#else 17#else
16 18
diff --git a/ot_http.c b/ot_http.c
index c5d553a..2e2a085 100644
--- a/ot_http.c
+++ b/ot_http.c
@@ -4,195 +4,202 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <sys/types.h>
8#include <arpa/inet.h> 7#include <arpa/inet.h>
9#include <stdlib.h> 8#include <pthread.h>
10#include <stdio.h> 9#include <stdio.h>
10#include <stdlib.h>
11#include <string.h> 11#include <string.h>
12#include <sys/types.h>
12#include <unistd.h> 13#include <unistd.h>
13#include <pthread.h>
14 14
15/* Libowfat */ 15/* Libowfat */
16#include "byte.h"
17#include "array.h" 16#include "array.h"
17#include "byte.h"
18#include "case.h"
18#include "iob.h" 19#include "iob.h"
19#include "ip6.h" 20#include "ip6.h"
20#include "scan.h" 21#include "scan.h"
21#include "case.h"
22 22
23/* Opentracker */ 23/* Opentracker */
24#include "trackerlogic.h" 24#include "ot_accesslist.h"
25#include "ot_mutex.h" 25#include "ot_fullscrape.h"
26#include "ot_http.h" 26#include "ot_http.h"
27#include "ot_iovec.h" 27#include "ot_iovec.h"
28#include "scan_urlencoded_query.h" 28#include "ot_mutex.h"
29#include "ot_fullscrape.h"
30#include "ot_stats.h" 29#include "ot_stats.h"
31#include "ot_accesslist.h" 30#include "scan_urlencoded_query.h"
31#include "trackerlogic.h"
32 32
33#define OT_MAXMULTISCRAPE_COUNT 64 33#define OT_MAXMULTISCRAPE_COUNT 64
34#define OT_BATCH_LIMIT (1024*1024*16) 34#define OT_BATCH_LIMIT (1024 * 1024 * 16)
35extern char *g_redirecturl; 35extern char *g_redirecturl;
36 36
37char *g_stats_path; 37char *g_stats_path;
38ssize_t g_stats_path_len; 38ssize_t g_stats_path_len;
39 39
40enum { 40enum { SUCCESS_HTTP_HEADER_LENGTH = 80, SUCCESS_HTTP_SIZE_OFF = 17 };
41 SUCCESS_HTTP_HEADER_LENGTH = 80,
42 SUCCESS_HTTP_SIZE_OFF = 17 };
43 41
44static void http_senddata( const int64 sock, struct ot_workstruct *ws ) { 42static void http_senddata(const int64 sock, struct ot_workstruct *ws) {
45 struct http_data *cookie = io_getcookie( sock ); 43 struct http_data *cookie = io_getcookie(sock);
46 ssize_t written_size; 44 ssize_t written_size;
47 45
48 if( !cookie ) { io_close(sock); return; } 46 if (!cookie) {
47 io_close(sock);
48 return;
49 }
49 50
50 /* whoever sends data is not interested in its input-array */ 51 /* whoever sends data is not interested in its input-array */
51 if( ws->keep_alive && ws->header_size != ws->request_size ) { 52 if (ws->keep_alive && ws->header_size != ws->request_size) {
52 size_t rest = ws->request_size - ws->header_size; 53 size_t rest = ws->request_size - ws->header_size;
53 if( array_start(&cookie->request) ) { 54 if (array_start(&cookie->request)) {
54 memmove( array_start(&cookie->request), ws->request + ws->header_size, rest ); 55 memmove(array_start(&cookie->request), ws->request + ws->header_size, rest);
55 array_truncate( &cookie->request, 1, rest ); 56 array_truncate(&cookie->request, 1, rest);
56 } else 57 } else
57 array_catb(&cookie->request, ws->request + ws->header_size, rest ); 58 array_catb(&cookie->request, ws->request + ws->header_size, rest);
58 } else 59 } else
59 array_reset( &cookie->request ); 60 array_reset(&cookie->request);
60 61
61 written_size = write( sock, ws->reply, ws->reply_size ); 62 written_size = write(sock, ws->reply, ws->reply_size);
62 if( ( written_size < 0 ) || ( ( written_size == ws->reply_size ) && !ws->keep_alive ) ) { 63 if ((written_size < 0) || ((written_size == ws->reply_size) && !ws->keep_alive)) {
63 array_reset( &cookie->request ); 64 array_reset(&cookie->request);
64 free( cookie ); io_close( sock ); return; 65 free(cookie);
66 io_close(sock);
67 return;
65 } 68 }
66 69
67 if( written_size < ws->reply_size ) { 70 if (written_size < ws->reply_size) {
68 char * outbuf; 71 char *outbuf;
69 tai6464 t; 72 tai6464 t;
70 73
71 if( !( outbuf = malloc( ws->reply_size - written_size ) ) ) { 74 if (!(outbuf = malloc(ws->reply_size - written_size))) {
72 array_reset( &cookie->request ); 75 array_reset(&cookie->request);
73 free(cookie); io_close( sock ); 76 free(cookie);
77 io_close(sock);
74 return; 78 return;
75 } 79 }
76 80
77 memcpy( outbuf, ws->reply + written_size, ws->reply_size - written_size ); 81 memcpy(outbuf, ws->reply + written_size, ws->reply_size - written_size);
78 if ( !cookie->batch ) { 82 if (!cookie->batch) {
79 cookie->batch = malloc( sizeof(io_batch) ); 83 cookie->batch = malloc(sizeof(io_batch));
80 iob_init_autofree(cookie->batch, 0); 84 iob_init_autofree(cookie->batch, 0);
81 cookie->batches = 1; 85 cookie->batches = 1;
82 } 86 }
83 87
84 iob_addbuf_free( cookie->batch, outbuf, ws->reply_size - written_size ); 88 iob_addbuf_free(cookie->batch, outbuf, ws->reply_size - written_size);
85 89
86 /* writeable short data sockets just have a tcp timeout */ 90 /* writeable short data sockets just have a tcp timeout */
87 if( !ws->keep_alive ) { 91 if (!ws->keep_alive) {
88 taia_uint( &t, 0 ); io_timeout( sock, t ); 92 taia_uint(&t, 0);
89 io_dontwantread( sock ); 93 io_timeout(sock, t);
94 io_dontwantread(sock);
90 } 95 }
91 io_wantwrite( sock ); 96 io_wantwrite(sock);
92 } 97 }
93} 98}
94 99
95#define HTTPERROR_302 return http_issue_error( sock, ws, CODE_HTTPERROR_302 ) 100#define HTTPERROR_302 return http_issue_error(sock, ws, CODE_HTTPERROR_302)
96#define HTTPERROR_400 return http_issue_error( sock, ws, CODE_HTTPERROR_400 ) 101#define HTTPERROR_400 return http_issue_error(sock, ws, CODE_HTTPERROR_400)
97#define HTTPERROR_400_PARAM return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM ) 102#define HTTPERROR_400_PARAM return http_issue_error(sock, ws, CODE_HTTPERROR_400_PARAM)
98#define HTTPERROR_400_COMPACT return http_issue_error( sock, ws, CODE_HTTPERROR_400_COMPACT ) 103#define HTTPERROR_400_COMPACT return http_issue_error(sock, ws, CODE_HTTPERROR_400_COMPACT)
99#define HTTPERROR_400_DOUBLEHASH return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM ) 104#define HTTPERROR_400_DOUBLEHASH return http_issue_error(sock, ws, CODE_HTTPERROR_400_PARAM)
100#define HTTPERROR_402_NOTMODEST return http_issue_error( sock, ws, CODE_HTTPERROR_402_NOTMODEST ) 105#define HTTPERROR_402_NOTMODEST return http_issue_error(sock, ws, CODE_HTTPERROR_402_NOTMODEST)
101#define HTTPERROR_403_IP return http_issue_error( sock, ws, CODE_HTTPERROR_403_IP ) 106#define HTTPERROR_403_IP return http_issue_error(sock, ws, CODE_HTTPERROR_403_IP)
102#define HTTPERROR_404 return http_issue_error( sock, ws, CODE_HTTPERROR_404 ) 107#define HTTPERROR_404 return http_issue_error(sock, ws, CODE_HTTPERROR_404)
103#define HTTPERROR_500 return http_issue_error( sock, ws, CODE_HTTPERROR_500 ) 108#define HTTPERROR_500 return http_issue_error(sock, ws, CODE_HTTPERROR_500)
104ssize_t http_issue_error( const int64 sock, struct ot_workstruct *ws, int code ) { 109ssize_t http_issue_error(const int64 sock, struct ot_workstruct *ws, int code) {
105 char *error_code[] = { "302 Found", "400 Invalid Request", "400 Invalid Request", "400 Invalid Request", "402 Payment Required", 110 char *error_code[] = {"302 Found", "400 Invalid Request", "400 Invalid Request", "400 Invalid Request", "402 Payment Required",
106 "403 Not Modest", "403 Access Denied", "404 Not Found", "500 Internal Server Error" }; 111 "403 Not Modest", "403 Access Denied", "404 Not Found", "500 Internal Server Error"};
107 char *title = error_code[code]; 112 char *title = error_code[code];
108 113
109 ws->reply = ws->outbuf; 114 ws->reply = ws->outbuf;
110 if( code == CODE_HTTPERROR_302 ) 115 if (code == CODE_HTTPERROR_302)
111 ws->reply_size = snprintf( ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 302 Found\r\nContent-Length: 0\r\nLocation: %s\r\n\r\n", g_redirecturl ); 116 ws->reply_size = snprintf(ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 302 Found\r\nContent-Length: 0\r\nLocation: %s\r\n\r\n", g_redirecturl);
112 else 117 else
113 ws->reply_size = snprintf( ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 %s\r\nContent-Type: text/html\r\nContent-Length: %zd\r\n\r\n<title>%s</title>\n", title, strlen(title)+16-4,title+4); 118 ws->reply_size = snprintf(ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 %s\r\nContent-Type: text/html\r\nContent-Length: %zd\r\n\r\n<title>%s</title>\n", title,
119 strlen(title) + 16 - 4, title + 4);
114 120
115#ifdef _DEBUG_HTTPERROR 121#ifdef _DEBUG_HTTPERROR
116 fprintf( stderr, "DEBUG: invalid request was: %s\n", ws->debugbuf ); 122 fprintf(stderr, "DEBUG: invalid request was: %s\n", ws->debugbuf);
117#endif 123#endif
118 stats_issue_event( EVENT_FAILED, FLAG_TCP, code ); 124 stats_issue_event(EVENT_FAILED, FLAG_TCP, code);
119 http_senddata( sock, ws ); 125 http_senddata(sock, ws);
120 return ws->reply_size = -2; 126 return ws->reply_size = -2;
121} 127}
122 128
123ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial ) { 129ssize_t http_sendiovecdata(const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial) {
124 struct http_data *cookie = io_getcookie( sock ); 130 struct http_data *cookie = io_getcookie(sock);
125 io_batch *current; 131 io_batch *current;
126 char *header; 132 char *header;
127 const char *encoding = ""; 133 const char *encoding = "";
128 int i; 134 int i;
129 size_t header_size, size = iovec_length( &iovec_entries, (const struct iovec **)&iovector ); 135 size_t header_size, size = iovec_length(&iovec_entries, (const struct iovec **)&iovector);
130 tai6464 t; 136 tai6464 t;
131 137
132 /* No cookie? Bad socket. Leave. */ 138 /* No cookie? Bad socket. Leave. */
133 if( !cookie ) { 139 if (!cookie) {
134 iovec_free( &iovec_entries, &iovector ); 140 iovec_free(&iovec_entries, &iovector);
135 HTTPERROR_500; 141 HTTPERROR_500;
136 } 142 }
137 143
138 /* If this socket collected request in a buffer, free it now */ 144 /* If this socket collected request in a buffer, free it now */
139 array_reset( &cookie->request ); 145 array_reset(&cookie->request);
140 146
141 /* If we came here, wait for the answer is over */ 147 /* If we came here, wait for the answer is over */
142 if (cookie->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK) { 148 if (cookie->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK) {
143 io_dontwantread( sock ); 149 io_dontwantread(sock);
144 cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK; 150 cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK;
145 } 151 }
146 152
147 if( iovec_entries ) { 153 if (iovec_entries) {
148 154
149 if( cookie->flag & STRUCT_HTTP_FLAG_GZIP ) 155 if (cookie->flag & STRUCT_HTTP_FLAG_GZIP)
150 encoding = "Content-Encoding: gzip\r\n"; 156 encoding = "Content-Encoding: gzip\r\n";
151 else if( cookie->flag & STRUCT_HTTP_FLAG_BZIP2 ) 157 else if (cookie->flag & STRUCT_HTTP_FLAG_BZIP2)
152 encoding = "Content-Encoding: bzip2\r\n"; 158 encoding = "Content-Encoding: bzip2\r\n";
153 159
154 if( !(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED) ) 160 if (!(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED))
155 header_size = asprintf( &header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\n%sContent-Length: %zd\r\n\r\n", encoding, size ); 161 header_size = asprintf(&header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\n%sContent-Length: %zd\r\n\r\n", encoding, size);
156 else { 162 else {
157 if ( !(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER )) { 163 if (!(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER)) {
158 header_size = asprintf( &header, "HTTP/1.0 200 OK\r\nContent-Type: application/octet-stream\r\n%sTransfer-Encoding: chunked\r\n\r\n%zx\r\n", encoding, size ); 164 header_size =
165 asprintf(&header, "HTTP/1.0 200 OK\r\nContent-Type: application/octet-stream\r\n%sTransfer-Encoding: chunked\r\n\r\n%zx\r\n", encoding, size);
159 cookie->flag |= STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER; 166 cookie->flag |= STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER;
160 } else 167 } else
161 header_size = asprintf( &header, "%zx\r\n", size ); 168 header_size = asprintf(&header, "%zx\r\n", size);
162 } 169 }
163 if( !header ) { 170 if (!header) {
164 iovec_free( &iovec_entries, &iovector ); 171 iovec_free(&iovec_entries, &iovector);
165 HTTPERROR_500; 172 HTTPERROR_500;
166 } 173 }
167 174
168 if (!cookie->batch ) { 175 if (!cookie->batch) {
169 cookie->batch = malloc( sizeof(io_batch) ); 176 cookie->batch = malloc(sizeof(io_batch));
170 if (!cookie->batch) { 177 if (!cookie->batch) {
171 free(header); 178 free(header);
172 iovec_free( &iovec_entries, &iovector ); 179 iovec_free(&iovec_entries, &iovector);
173 HTTPERROR_500; 180 HTTPERROR_500;
174 } 181 }
175 iob_init_autofree(cookie->batch, 0); 182 iob_init_autofree(cookie->batch, 0);
176 cookie->batches = 1; 183 cookie->batches = 1;
177 } 184 }
178 current = cookie->batch + cookie->batches - 1; 185 current = cookie->batch + cookie->batches - 1;
179 iob_addbuf_free( current, header, header_size ); 186 iob_addbuf_free(current, header, header_size);
180 187
181 /* Split huge iovectors into separate io_batches */ 188 /* Split huge iovectors into separate io_batches */
182 for( i=0; i<iovec_entries; ++i ) { 189 for (i = 0; i < iovec_entries; ++i) {
183 /* If the current batch's limit is reached, try to reallocate a new batch to work on */ 190 /* If the current batch's limit is reached, try to reallocate a new batch to work on */
184 if( current->bytesleft > OT_BATCH_LIMIT ) { 191 if (current->bytesleft > OT_BATCH_LIMIT) {
185 io_batch * new_batch = realloc( cookie->batch, (cookie->batches + 1) * sizeof(io_batch) ); 192 io_batch *new_batch = realloc(cookie->batch, (cookie->batches + 1) * sizeof(io_batch));
186 if( new_batch ) { 193 if (new_batch) {
187 cookie->batch = new_batch; 194 cookie->batch = new_batch;
188 current = cookie->batch + cookie->batches++; 195 current = cookie->batch + cookie->batches++;
189 iob_init_autofree(current ,0); 196 iob_init_autofree(current, 0);
190 } 197 }
191 } 198 }
192 iob_addbuf_free( current, iovector[i].iov_base, iovector[i].iov_len ); 199 iob_addbuf_free(current, iovector[i].iov_base, iovector[i].iov_len);
193 } 200 }
194 free( iovector ); 201 free(iovector);
195 if ( cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER ) 202 if (cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER)
196 iob_addbuf(current, "\r\n", 2); 203 iob_addbuf(current, "\r\n", 2);
197 } 204 }
198 205
@@ -203,69 +210,92 @@ ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iove
203 } 210 }
204 211
205 /* writeable sockets timeout after 10 minutes */ 212 /* writeable sockets timeout after 10 minutes */
206 taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND ); 213 taia_now(&t);
207 io_timeout( sock, t ); 214 taia_addsec(&t, &t, OT_CLIENT_TIMEOUT_SEND);
208 io_wantwrite( sock ); 215 io_timeout(sock, t);
216 io_wantwrite(sock);
209 return 0; 217 return 0;
210} 218}
211 219
212static ssize_t http_handle_stats( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { 220static ssize_t http_handle_stats(const int64 sock, struct ot_workstruct *ws, char *read_ptr) {
213static const ot_keywords keywords_main[] = 221 static const ot_keywords keywords_main[] = {{"mode", 1}, {"format", 2}, {"info_hash", 3}, {NULL, -3}};
214 { { "mode", 1 }, {"format", 2 }, {"info_hash", 3}, { NULL, -3 } }; 222 static const ot_keywords keywords_mode[] = {{"peer", TASK_STATS_PEERS},
215static const ot_keywords keywords_mode[] = 223 {"conn", TASK_STATS_CONNS},
216 { { "peer", TASK_STATS_PEERS }, { "conn", TASK_STATS_CONNS }, { "scrp", TASK_STATS_SCRAPE }, { "udp4", TASK_STATS_UDP }, { "tcp4", TASK_STATS_TCP }, 224 {"scrp", TASK_STATS_SCRAPE},
217 { "busy", TASK_STATS_BUSY_NETWORKS }, { "torr", TASK_STATS_TORRENTS }, { "fscr", TASK_STATS_FULLSCRAPE }, 225 {"udp4", TASK_STATS_UDP},
218 { "s24s", TASK_STATS_SLASH24S }, { "tpbs", TASK_STATS_TPB }, { "herr", TASK_STATS_HTTPERRORS }, { "completed", TASK_STATS_COMPLETED }, 226 {"tcp4", TASK_STATS_TCP},
219 { "top100", TASK_STATS_TOP100 }, { "top10", TASK_STATS_TOP10 }, { "renew", TASK_STATS_RENEW }, { "syncs", TASK_STATS_SYNCS }, { "version", TASK_STATS_VERSION }, 227 {"busy", TASK_STATS_BUSY_NETWORKS},
220 { "everything", TASK_STATS_EVERYTHING }, { "statedump", TASK_FULLSCRAPE_TRACKERSTATE }, { "fulllog", TASK_STATS_FULLLOG }, 228 {"torr", TASK_STATS_TORRENTS},
221 { "woodpeckers", TASK_STATS_WOODPECKERS}, 229 {"fscr", TASK_STATS_FULLSCRAPE},
230 {"s24s", TASK_STATS_SLASH24S},
231 {"tpbs", TASK_STATS_TPB},
232 {"herr", TASK_STATS_HTTPERRORS},
233 {"completed", TASK_STATS_COMPLETED},
234 {"top100", TASK_STATS_TOP100},
235 {"top10", TASK_STATS_TOP10},
236 {"renew", TASK_STATS_RENEW},
237 {"syncs", TASK_STATS_SYNCS},
238 {"version", TASK_STATS_VERSION},
239 {"everything", TASK_STATS_EVERYTHING},
240 {"statedump", TASK_FULLSCRAPE_TRACKERSTATE},
241 {"fulllog", TASK_STATS_FULLLOG},
242 {"woodpeckers", TASK_STATS_WOODPECKERS},
222#ifdef WANT_LOG_NUMWANT 243#ifdef WANT_LOG_NUMWANT
223 { "numwants", TASK_STATS_NUMWANTS}, 244 {"numwants", TASK_STATS_NUMWANTS},
224#endif 245#endif
225 { NULL, -3 } }; 246 {NULL, -3}};
226static const ot_keywords keywords_format[] = 247 static const ot_keywords keywords_format[] = {{"bin", TASK_FULLSCRAPE_TPB_BINARY}, {"ben", TASK_FULLSCRAPE},
227 { { "bin", TASK_FULLSCRAPE_TPB_BINARY }, { "ben", TASK_FULLSCRAPE }, { "url", TASK_FULLSCRAPE_TPB_URLENCODED }, 248 {"url", TASK_FULLSCRAPE_TPB_URLENCODED}, {"txt", TASK_FULLSCRAPE_TPB_ASCII},
228 { "txt", TASK_FULLSCRAPE_TPB_ASCII }, { "txtp", TASK_FULLSCRAPE_TPB_ASCII_PLUS }, { NULL, -3 } }; 249 {"txtp", TASK_FULLSCRAPE_TPB_ASCII_PLUS}, {NULL, -3}};
229 250
230 int mode = TASK_STATS_PEERS, scanon = 1, format = 0; 251 int mode = TASK_STATS_PEERS, scanon = 1, format = 0;
231 252
232#ifdef WANT_RESTRICT_STATS 253#ifdef WANT_RESTRICT_STATS
233 struct http_data *cookie = io_getcookie( sock ); 254 struct http_data *cookie = io_getcookie(sock);
234 255
235 if( !cookie || !accesslist_is_blessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) 256 if (!cookie || !accesslist_is_blessed(cookie->ip, OT_PERMISSION_MAY_STAT))
236 HTTPERROR_403_IP; 257 HTTPERROR_403_IP;
237#endif 258#endif
238 259
239 while( scanon ) { 260 while (scanon) {
240 switch( scan_find_keywords( keywords_main, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { 261 switch (scan_find_keywords(keywords_main, &read_ptr, SCAN_SEARCHPATH_PARAM)) {
241 case -2: scanon = 0; break; /* TERMINATOR */ 262 case -2:
242 case -1: HTTPERROR_400_PARAM; /* PARSE ERROR */ 263 scanon = 0;
243 case -3: scan_urlencoded_skipvalue( &read_ptr ); break; 264 break; /* TERMINATOR */
244 case 1: /* matched "mode" */ 265 case -1:
245 if( ( mode = scan_find_keywords( keywords_mode, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; 266 HTTPERROR_400_PARAM; /* PARSE ERROR */
267 case -3:
268 scan_urlencoded_skipvalue(&read_ptr);
269 break;
270 case 1: /* matched "mode" */
271 if ((mode = scan_find_keywords(keywords_mode, &read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0)
272 HTTPERROR_400_PARAM;
246 break; 273 break;
247 case 2: /* matched "format" */ 274 case 2: /* matched "format" */
248 if( ( format = scan_find_keywords( keywords_format, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; 275 if ((format = scan_find_keywords(keywords_format, &read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0)
276 HTTPERROR_400_PARAM;
249 break; 277 break;
250 case 3: HTTPERROR_400_PARAM; /* If the stats URL was mistakenly added as announce URL, return a 400 */ 278 case 3:
279 HTTPERROR_400_PARAM; /* If the stats URL was mistakenly added as announce URL, return a 400 */
251 } 280 }
252 } 281 }
253 282
254#ifdef WANT_FULLSCRAPE 283#ifdef WANT_FULLSCRAPE
255 if( mode == TASK_FULLSCRAPE_TRACKERSTATE ) { 284 if (mode == TASK_FULLSCRAPE_TRACKERSTATE) {
256 format = mode; mode = TASK_STATS_TPB; 285 format = mode;
286 mode = TASK_STATS_TPB;
257 } 287 }
258 288
259 if( mode == TASK_STATS_TPB ) { 289 if (mode == TASK_STATS_TPB) {
260 struct http_data* cookie = io_getcookie( sock ); 290 struct http_data *cookie = io_getcookie(sock);
261 tai6464 t; 291 tai6464 t;
262#ifdef WANT_COMPRESSION_GZIP 292#ifdef WANT_COMPRESSION_GZIP
263 ws->request[ws->request_size] = 0; 293 ws->request[ws->request_size] = 0;
264#ifndef WANT_COMPRESSION_GZIP_ALWAYS 294#ifndef WANT_COMPRESSION_GZIP_ALWAYS
265 if( strstr( read_ptr - 1, "gzip" ) ) { 295 if (strstr(read_ptr - 1, "gzip")) {
266#endif 296#endif
267 cookie->flag |= STRUCT_HTTP_FLAG_GZIP; 297 cookie->flag |= STRUCT_HTTP_FLAG_GZIP;
268 format |= TASK_FLAG_GZIP; 298 format |= TASK_FLAG_GZIP;
269#ifndef WANT_COMPRESSION_GZIP_ALWAYS 299#ifndef WANT_COMPRESSION_GZIP_ALWAYS
270 } 300 }
271#endif 301#endif
@@ -274,282 +304,321 @@ static const ot_keywords keywords_format[] =
274 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED; 304 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED;
275 305
276 /* Clients waiting for us should not easily timeout */ 306 /* Clients waiting for us should not easily timeout */
277 taia_uint( &t, 0 ); io_timeout( sock, t ); 307 taia_uint(&t, 0);
278 fullscrape_deliver( sock, format ); 308 io_timeout(sock, t);
279 io_dontwantread( sock ); 309 fullscrape_deliver(sock, format);
310 io_dontwantread(sock);
280 return ws->reply_size = -2; 311 return ws->reply_size = -2;
281 } 312 }
282#endif 313#endif
283 314
284 /* default format for now */ 315 /* default format for now */
285 if( ( mode & TASK_CLASS_MASK ) == TASK_STATS ) { 316 if ((mode & TASK_CLASS_MASK) == TASK_STATS) {
286 tai6464 t; 317 tai6464 t;
287 /* Complex stats also include expensive memory debugging tools */ 318 /* Complex stats also include expensive memory debugging tools */
288 taia_uint( &t, 0 ); io_timeout( sock, t ); 319 taia_uint(&t, 0);
289 stats_deliver( sock, mode ); 320 io_timeout(sock, t);
321 stats_deliver(sock, mode);
290 return ws->reply_size = -2; 322 return ws->reply_size = -2;
291 } 323 }
292 324
293 /* Simple stats can be answerred immediately */ 325 /* Simple stats can be answerred immediately */
294 return ws->reply_size = return_stats_for_tracker( ws->reply, mode, 0 ); 326 return ws->reply_size = return_stats_for_tracker(ws->reply, mode, 0);
295} 327}
296 328
297#ifdef WANT_MODEST_FULLSCRAPES 329#ifdef WANT_MODEST_FULLSCRAPES
298static pthread_mutex_t g_modest_fullscrape_mutex = PTHREAD_MUTEX_INITIALIZER; 330static pthread_mutex_t g_modest_fullscrape_mutex = PTHREAD_MUTEX_INITIALIZER;
299static ot_vector g_modest_fullscrape_timeouts; 331static ot_vector g_modest_fullscrape_timeouts;
300typedef struct { ot_ip6 ip; ot_time last_fullscrape; } ot_scrape_log; 332typedef struct {
333 ot_ip6 ip;
334 ot_time last_fullscrape;
335} ot_scrape_log;
301#endif 336#endif
302 337
303#ifdef WANT_FULLSCRAPE 338#ifdef WANT_FULLSCRAPE
304static ssize_t http_handle_fullscrape( const int64 sock, struct ot_workstruct *ws ) { 339static ssize_t http_handle_fullscrape(const int64 sock, struct ot_workstruct *ws) {
305 struct http_data* cookie = io_getcookie( sock ); 340 struct http_data *cookie = io_getcookie(sock);
306 int format = 0; 341 int format = 0;
307 tai6464 t; 342 tai6464 t;
308 343
309#ifdef WANT_MODEST_FULLSCRAPES 344#ifdef WANT_MODEST_FULLSCRAPES
310 { 345 {
311 ot_scrape_log this_peer, *new_peer; 346 ot_scrape_log this_peer, *new_peer;
312 int exactmatch; 347 int exactmatch;
313 memcpy( this_peer.ip, cookie->ip, sizeof(ot_ip6)); 348 memcpy(this_peer.ip, cookie->ip, sizeof(ot_ip6));
314 this_peer.last_fullscrape = g_now_seconds; 349 this_peer.last_fullscrape = g_now_seconds;
315 pthread_mutex_lock(&g_modest_fullscrape_mutex); 350 pthread_mutex_lock(&g_modest_fullscrape_mutex);
316 new_peer = vector_find_or_insert( &g_modest_fullscrape_timeouts, &this_peer, sizeof(ot_scrape_log), sizeof(ot_ip6), &exactmatch ); 351 new_peer = vector_find_or_insert(&g_modest_fullscrape_timeouts, &this_peer, sizeof(ot_scrape_log), sizeof(ot_ip6), &exactmatch);
317 if( !new_peer ) { 352 if (!new_peer) {
318 pthread_mutex_unlock(&g_modest_fullscrape_mutex); 353 pthread_mutex_unlock(&g_modest_fullscrape_mutex);
319 HTTPERROR_500; 354 HTTPERROR_500;
320 } 355 }
321 if( exactmatch && ( this_peer.last_fullscrape - new_peer->last_fullscrape ) < OT_MODEST_PEER_TIMEOUT ) { 356 if (exactmatch && (this_peer.last_fullscrape - new_peer->last_fullscrape) < OT_MODEST_PEER_TIMEOUT) {
322 pthread_mutex_unlock(&g_modest_fullscrape_mutex); 357 pthread_mutex_unlock(&g_modest_fullscrape_mutex);
323 HTTPERROR_402_NOTMODEST; 358 HTTPERROR_402_NOTMODEST;
324 } 359 }
325 memcpy( new_peer, &this_peer, sizeof(ot_scrape_log)); 360 memcpy(new_peer, &this_peer, sizeof(ot_scrape_log));
326 pthread_mutex_unlock(&g_modest_fullscrape_mutex); 361 pthread_mutex_unlock(&g_modest_fullscrape_mutex);
327 } 362 }
328#endif 363#endif
329 364
330#ifdef WANT_COMPRESSION_GZIP 365#ifdef WANT_COMPRESSION_GZIP
331 ws->request[ws->request_size-1] = 0; 366 ws->request[ws->request_size - 1] = 0;
332#ifndef WANT_COMPRESSION_GZIP_ALWAYS 367#ifndef WANT_COMPRESSION_GZIP_ALWAYS
333 if( strstr( ws->request, "gzip" ) ) { 368 if (strstr(ws->request, "gzip")) {
334#endif 369#endif
335 cookie->flag |= STRUCT_HTTP_FLAG_GZIP; 370 cookie->flag |= STRUCT_HTTP_FLAG_GZIP;
336 format = TASK_FLAG_GZIP; 371 format = TASK_FLAG_GZIP;
337 stats_issue_event( EVENT_FULLSCRAPE_REQUEST_GZIP, 0, (uintptr_t)cookie->ip ); 372 stats_issue_event(EVENT_FULLSCRAPE_REQUEST_GZIP, 0, (uintptr_t)cookie->ip);
338#ifndef WANT_COMPRESSION_GZIP_ALWAYS 373#ifndef WANT_COMPRESSION_GZIP_ALWAYS
339 } else 374 } else
340#endif 375#endif
341#endif 376#endif
342 stats_issue_event( EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)cookie->ip ); 377 stats_issue_event(EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)cookie->ip);
343 378
344#ifdef _DEBUG_HTTPERROR 379#ifdef _DEBUG_HTTPERROR
345 fprintf( stderr, "%s", ws->debugbuf ); 380 fprintf(stderr, "%s", ws->debugbuf);
346#endif 381#endif
347 382
348 /* Pass this task to the worker thread */ 383 /* Pass this task to the worker thread */
349 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED; 384 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED;
350 /* Clients waiting for us should not easily timeout */ 385 /* Clients waiting for us should not easily timeout */
351 taia_uint( &t, 0 ); io_timeout( sock, t ); 386 taia_uint(&t, 0);
352 fullscrape_deliver( sock, TASK_FULLSCRAPE | format ); 387 io_timeout(sock, t);
353 io_dontwantread( sock ); 388 fullscrape_deliver(sock, TASK_FULLSCRAPE | format);
389 io_dontwantread(sock);
354 return ws->reply_size = -2; 390 return ws->reply_size = -2;
355} 391}
356#endif 392#endif
357 393
358static ssize_t http_handle_scrape( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { 394static ssize_t http_handle_scrape(const int64 sock, struct ot_workstruct *ws, char *read_ptr) {
359 static const ot_keywords keywords_scrape[] = { { "info_hash", 1 }, { NULL, -3 } }; 395 static const ot_keywords keywords_scrape[] = {{"info_hash", 1}, {NULL, -3}};
360 396
361 ot_hash * multiscrape_buf = (ot_hash*)ws->request; 397 ot_hash *multiscrape_buf = (ot_hash *)ws->request;
362 int scanon = 1, numwant = 0; 398 int scanon = 1, numwant = 0;
363 399
364 /* This is to hack around stupid clients that send "scrape ?info_hash" */ 400 /* This is to hack around stupid clients that send "scrape ?info_hash" */
365 if( read_ptr[-1] != '?' ) { 401 if (read_ptr[-1] != '?') {
366 while( ( *read_ptr != '?' ) && ( *read_ptr != '\n' ) ) ++read_ptr; 402 while ((*read_ptr != '?') && (*read_ptr != '\n'))
367 if( *read_ptr == '\n' ) HTTPERROR_400_PARAM; 403 ++read_ptr;
404 if (*read_ptr == '\n')
405 HTTPERROR_400_PARAM;
368 ++read_ptr; 406 ++read_ptr;
369 } 407 }
370 408
371 while( scanon ) { 409 while (scanon) {
372 switch( scan_find_keywords( keywords_scrape, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { 410 switch (scan_find_keywords(keywords_scrape, &read_ptr, SCAN_SEARCHPATH_PARAM)) {
373 case -2: scanon = 0; break; /* TERMINATOR */ 411 case -2:
374 default: HTTPERROR_400_PARAM; /* PARSE ERROR */ 412 scanon = 0;
375 case -3: scan_urlencoded_skipvalue( &read_ptr ); break; 413 break; /* TERMINATOR */
376 case 1: /* matched "info_hash" */ 414 default:
415 HTTPERROR_400_PARAM; /* PARSE ERROR */
416 case -3:
417 scan_urlencoded_skipvalue(&read_ptr);
418 break;
419 case 1: /* matched "info_hash" */
377 /* ignore this, when we have less than 20 bytes */ 420 /* ignore this, when we have less than 20 bytes */
378 if( scan_urlencoded_query( &read_ptr, (char*)(multiscrape_buf + numwant++), SCAN_SEARCHPATH_VALUE ) != (ssize_t)sizeof(ot_hash) ) 421 if (scan_urlencoded_query(&read_ptr, (char *)(multiscrape_buf + numwant++), SCAN_SEARCHPATH_VALUE) != (ssize_t)sizeof(ot_hash))
379 HTTPERROR_400_PARAM; 422 HTTPERROR_400_PARAM;
380 break; 423 break;
381 } 424 }
382 } 425 }
383 426
384 /* No info_hash found? Inform user */ 427 /* No info_hash found? Inform user */
385 if( !numwant ) HTTPERROR_400_PARAM; 428 if (!numwant)
429 HTTPERROR_400_PARAM;
386 430
387 /* Limit number of hashes to process */ 431 /* Limit number of hashes to process */
388 if( numwant > OT_MAXMULTISCRAPE_COUNT ) 432 if (numwant > OT_MAXMULTISCRAPE_COUNT)
389 numwant = OT_MAXMULTISCRAPE_COUNT; 433 numwant = OT_MAXMULTISCRAPE_COUNT;
390 434
391 /* Enough for http header + whole scrape string */ 435 /* Enough for http header + whole scrape string */
392 ws->reply_size = return_tcp_scrape_for_torrent( (const ot_hash*)multiscrape_buf, numwant, ws->reply ); 436 ws->reply_size = return_tcp_scrape_for_torrent((const ot_hash *)multiscrape_buf, numwant, ws->reply);
393 stats_issue_event( EVENT_SCRAPE, FLAG_TCP, ws->reply_size ); 437 stats_issue_event(EVENT_SCRAPE, FLAG_TCP, ws->reply_size);
394 return ws->reply_size; 438 return ws->reply_size;
395} 439}
396 440
397#ifdef WANT_LOG_NUMWANT 441#ifdef WANT_LOG_NUMWANT
398 unsigned long long numwants[201]; 442unsigned long long numwants[201];
399#endif 443#endif
400 444
401#if defined( WANT_KEEPALIVE ) || defined( WANT_IP_FROM_PROXY ) 445#if defined(WANT_KEEPALIVE) || defined(WANT_IP_FROM_PROXY)
402static char* http_header( char *data, size_t byte_count, char *header ) { 446static char *http_header(char *data, size_t byte_count, char *header) {
403 size_t i; 447 size_t i;
404 long sl = strlen( header ); 448 long sl = strlen(header);
405 for( i = 0; i + sl + 2 < byte_count; ++i ) { 449 for (i = 0; i + sl + 2 < byte_count; ++i) {
406 if( data[i] != '\n' || data[ i + sl + 1] != ':' ) continue; 450 if (data[i] != '\n' || data[i + sl + 1] != ':')
407 if( !case_equalb( data + i + 1, sl, header ) ) continue; 451 continue;
452 if (!case_equalb(data + i + 1, sl, header))
453 continue;
408 data += i + sl + 2; 454 data += i + sl + 2;
409 while( *data == ' ' || *data == '\t' ) ++data; 455 while (*data == ' ' || *data == '\t')
456 ++data;
410 return data; 457 return data;
411 } 458 }
412 return 0; 459 return 0;
413} 460}
414#endif 461#endif
415 462
416static ot_keywords keywords_announce[] = { { "port", 1 }, { "left", 2 }, { "event", 3 }, { "numwant", 4 }, { "compact", 5 }, { "compact6", 5 }, { "info_hash", 6 }, 463static ot_keywords keywords_announce[] = {{"port", 1}, {"left", 2}, {"event", 3}, {"numwant", 4}, {"compact", 5}, {"compact6", 5}, {"info_hash", 6},
417#ifdef WANT_IP_FROM_QUERY_STRING 464#ifdef WANT_IP_FROM_QUERY_STRING
418{ "ip", 7 }, 465 {"ip", 7},
419#endif 466#endif
420#ifdef WANT_FULLLOG_NETWORKS 467#ifdef WANT_FULLLOG_NETWORKS
421{ "lognet", 8 }, 468 {"lognet", 8},
422#endif 469#endif
423{ "peer_id", 9 }, 470 {"peer_id", 9}, {NULL, -3}};
424{ NULL, -3 } }; 471static ot_keywords keywords_announce_event[] = {{"completed", 1}, {"stopped", 2}, {NULL, -3}};
425static ot_keywords keywords_announce_event[] = { { "completed", 1 }, { "stopped", 2 }, { NULL, -3 } }; 472static ssize_t http_handle_announce(const int64 sock, struct ot_workstruct *ws, char *read_ptr) {
426static ssize_t http_handle_announce( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) {
427 int numwant, tmp, scanon; 473 int numwant, tmp, scanon;
428 unsigned short port = 0; 474 unsigned short port = 0;
429 char *write_ptr; 475 char *write_ptr;
430 ssize_t len; 476 ssize_t len;
431 struct http_data *cookie = io_getcookie( sock ); 477 struct http_data *cookie = io_getcookie(sock);
432 478
433 /* This is to hack around stupid clients that send "announce ?info_hash" */ 479 /* This is to hack around stupid clients that send "announce ?info_hash" */
434 if( read_ptr[-1] != '?' ) { 480 if (read_ptr[-1] != '?') {
435 while( ( *read_ptr != '?' ) && ( *read_ptr != '\n' ) ) ++read_ptr; 481 while ((*read_ptr != '?') && (*read_ptr != '\n'))
436 if( *read_ptr == '\n' ) HTTPERROR_400_PARAM; 482 ++read_ptr;
483 if (*read_ptr == '\n')
484 HTTPERROR_400_PARAM;
437 ++read_ptr; 485 ++read_ptr;
438 } 486 }
439 487
440#ifdef WANT_IP_FROM_PROXY 488#ifdef WANT_IP_FROM_PROXY
441 if( accesslist_is_blessed( cookie->ip, OT_PERMISSION_MAY_PROXY ) ) { 489 if (accesslist_is_blessed(cookie->ip, OT_PERMISSION_MAY_PROXY)) {
442 ot_ip6 proxied_ip; 490 ot_ip6 proxied_ip;
443 char *fwd = http_header( ws->request, ws->header_size, "x-forwarded-for" ); 491 char *fwd = http_header(ws->request, ws->header_size, "x-forwarded-for");
444 if( fwd && scan_ip6( fwd, proxied_ip ) ) { 492 if (fwd && scan_ip6(fwd, proxied_ip)) {
445 OT_SETIP( ws->peer, proxied_ip ); 493 OT_SETIP(ws->peer, proxied_ip);
446 } else 494 } else
447 OT_SETIP( ws->peer, cookie->ip ); 495 OT_SETIP(ws->peer, cookie->ip);
448 } else 496 } else
449#endif 497#endif
450 OT_SETIP( ws->peer, cookie->ip ); 498 OT_SETIP(ws->peer, cookie->ip);
451 499
452 ws->peer_id = NULL; 500 ws->peer_id = NULL;
453 ws->hash = NULL; 501 ws->hash = NULL;
454 502
455 OT_SETPORT( ws->peer, &port ); 503 OT_SETPORT(ws->peer, &port);
456 OT_PEERFLAG( ws->peer ) = 0; 504 OT_PEERFLAG(ws->peer) = 0;
457 numwant = 50; 505 numwant = 50;
458 scanon = 1; 506 scanon = 1;
459 507
460 while( scanon ) { 508 while (scanon) {
461 switch( scan_find_keywords(keywords_announce, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { 509 switch (scan_find_keywords(keywords_announce, &read_ptr, SCAN_SEARCHPATH_PARAM)) {
462 case -2: scanon = 0; break; /* TERMINATOR */ 510 case -2:
463 case -1: HTTPERROR_400_PARAM; /* PARSE ERROR */ 511 scanon = 0;
464 case -3: scan_urlencoded_skipvalue( &read_ptr ); break; 512 break; /* TERMINATOR */
513 case -1:
514 HTTPERROR_400_PARAM; /* PARSE ERROR */
515 case -3:
516 scan_urlencoded_skipvalue(&read_ptr);
517 break;
465 case 1: /* matched "port" */ 518 case 1: /* matched "port" */
466 len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); 519 len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE);
467 if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &tmp ) || ( tmp > 0xffff ) ) HTTPERROR_400_PARAM; 520 if ((len <= 0) || scan_fixed_int(write_ptr, len, &tmp) || (tmp > 0xffff))
468 port = htons( tmp ); OT_SETPORT( &ws->peer, &port ); 521 HTTPERROR_400_PARAM;
522 port = htons(tmp);
523 OT_SETPORT(&ws->peer, &port);
469 break; 524 break;
470 case 2: /* matched "left" */ 525 case 2: /* matched "left" */
471 if( ( len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; 526 if ((len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0)
472 if( scan_fixed_int( write_ptr, len, &tmp ) ) tmp = 0; 527 HTTPERROR_400_PARAM;
473 if( !tmp ) OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_SEEDING; 528 if (scan_fixed_int(write_ptr, len, &tmp))
529 tmp = 0;
530 if (!tmp)
531 OT_PEERFLAG(&ws->peer) |= PEER_FLAG_SEEDING;
474 break; 532 break;
475 case 3: /* matched "event" */ 533 case 3: /* matched "event" */
476 switch( scan_find_keywords( keywords_announce_event, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) { 534 switch (scan_find_keywords(keywords_announce_event, &read_ptr, SCAN_SEARCHPATH_VALUE)) {
477 case -1: HTTPERROR_400_PARAM; 535 case -1:
478 case 1: /* matched "completed" */ 536 HTTPERROR_400_PARAM;
479 OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED; 537 case 1: /* matched "completed" */
480 break; 538 OT_PEERFLAG(&ws->peer) |= PEER_FLAG_COMPLETED;
481 case 2: /* matched "stopped" */ 539 break;
482 OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED; 540 case 2: /* matched "stopped" */
483 break; 541 OT_PEERFLAG(&ws->peer) |= PEER_FLAG_STOPPED;
484 default: 542 break;
485 break; 543 default:
544 break;
486 } 545 }
487 break; 546 break;
488 case 4: /* matched "numwant" */ 547 case 4: /* matched "numwant" */
489 len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); 548 len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE);
490 if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &numwant ) ) HTTPERROR_400_PARAM; 549 if ((len <= 0) || scan_fixed_int(write_ptr, len, &numwant))
491 if( numwant < 0 ) numwant = 50; 550 HTTPERROR_400_PARAM;
492 if( numwant > 200 ) numwant = 200; 551 if (numwant < 0)
552 numwant = 50;
553 if (numwant > 200)
554 numwant = 200;
493 break; 555 break;
494 case 5: /* matched "compact" */ 556 case 5: /* matched "compact" */
495 len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); 557 len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE);
496 if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &tmp ) ) HTTPERROR_400_PARAM; 558 if ((len <= 0) || scan_fixed_int(write_ptr, len, &tmp))
497 if( !tmp ) HTTPERROR_400_COMPACT; 559 HTTPERROR_400_PARAM;
560 if (!tmp)
561 HTTPERROR_400_COMPACT;
498 break; 562 break;
499 case 6: /* matched "info_hash" */ 563 case 6: /* matched "info_hash" */
500 if( ws->hash ) HTTPERROR_400_DOUBLEHASH; 564 if (ws->hash)
565 HTTPERROR_400_DOUBLEHASH;
501 /* ignore this, when we have less than 20 bytes */ 566 /* ignore this, when we have less than 20 bytes */
502 if( scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) != 20 ) HTTPERROR_400_PARAM; 567 if (scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE) != 20)
503 ws->hash = (ot_hash*)write_ptr; 568 HTTPERROR_400_PARAM;
569 ws->hash = (ot_hash *)write_ptr;
504 break; 570 break;
505#ifdef WANT_IP_FROM_QUERY_STRING 571#ifdef WANT_IP_FROM_QUERY_STRING
506 case 7: /* matched "ip" */ 572 case 7: /* matched "ip" */
507 { 573 {
508 char *tmp_buf1 = ws->reply, *tmp_buf2 = ws->reply+16; 574 char *tmp_buf1 = ws->reply, *tmp_buf2 = ws->reply + 16;
509 len = scan_urlencoded_query( &read_ptr, tmp_buf2, SCAN_SEARCHPATH_VALUE ); 575 len = scan_urlencoded_query(&read_ptr, tmp_buf2, SCAN_SEARCHPATH_VALUE);
510 tmp_buf2[len] = 0; 576 tmp_buf2[len] = 0;
511 if( ( len <= 0 ) || !scan_ip6( tmp_buf2, tmp_buf1 ) ) HTTPERROR_400_PARAM; 577 if ((len <= 0) || !scan_ip6(tmp_buf2, tmp_buf1))
512 OT_SETIP( &ws->peer, tmp_buf1 ); 578 HTTPERROR_400_PARAM;
513 } 579 OT_SETIP(&ws->peer, tmp_buf1);
514 break; 580 } break;
515#endif 581#endif
516#ifdef WANT_FULLLOG_NETWORKS 582#ifdef WANT_FULLLOG_NETWORKS
517 case 8: /* matched "lognet" */ 583 case 8: /* matched "lognet" */
518 { 584 {
519 //if( accesslist_is_blessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) { 585 // if( accesslist_is_blessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) {
520 char *tmp_buf = ws->reply; 586 char *tmp_buf = ws->reply;
521 ot_net net; 587 ot_net net;
522 signed short parsed, bits; 588 signed short parsed, bits;
523 589
524 len = scan_urlencoded_query( &read_ptr, tmp_buf, SCAN_SEARCHPATH_VALUE ); 590 len = scan_urlencoded_query(&read_ptr, tmp_buf, SCAN_SEARCHPATH_VALUE);
525 tmp_buf[len] = 0; 591 tmp_buf[len] = 0;
526 if( len <= 0 ) HTTPERROR_400_PARAM; 592 if (len <= 0)
527 if( *tmp_buf == '-' ) { 593 HTTPERROR_400_PARAM;
528 loglist_reset( ); 594 if (*tmp_buf == '-') {
529 return ws->reply_size = sprintf( ws->reply, "Successfully removed.\n" ); 595 loglist_reset();
530 } 596 return ws->reply_size = sprintf(ws->reply, "Successfully removed.\n");
531 parsed = scan_ip6( tmp_buf, net.address );
532 if( !parsed ) HTTPERROR_400_PARAM;
533 if( tmp_buf[parsed++] != '/' )
534 bits = 128;
535 else {
536 parsed = scan_short( tmp_buf + parsed, &bits );
537 if( !parsed ) HTTPERROR_400_PARAM;
538 if( ip6_isv4mapped( net.address ) )
539 bits += 96;
540 }
541 net.bits = bits;
542 loglist_add_network( &net );
543 return ws->reply_size = sprintf( ws->reply, "Successfully added.\n" );
544 //}
545 } 597 }
546 break; 598 parsed = scan_ip6(tmp_buf, net.address);
599 if (!parsed)
600 HTTPERROR_400_PARAM;
601 if (tmp_buf[parsed++] != '/')
602 bits = 128;
603 else {
604 parsed = scan_short(tmp_buf + parsed, &bits);
605 if (!parsed)
606 HTTPERROR_400_PARAM;
607 if (ip6_isv4mapped(net.address))
608 bits += 96;
609 }
610 net.bits = bits;
611 loglist_add_network(&net);
612 return ws->reply_size = sprintf(ws->reply, "Successfully added.\n");
613 //}
614 } break;
547#endif 615#endif
548 case 9: /* matched "peer_id" */ 616 case 9: /* matched "peer_id" */
549 /* ignore this, when we have less than 20 bytes */ 617 /* ignore this, when we have less than 20 bytes */
550 if( scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) != 20 ) HTTPERROR_400_PARAM; 618 if (scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE) != 20)
551 ws->peer_id = write_ptr; 619 HTTPERROR_400_PARAM;
552 break; 620 ws->peer_id = write_ptr;
621 break;
553 } 622 }
554 } 623 }
555 624
@@ -562,100 +631,107 @@ static ssize_t http_handle_announce( const int64 sock, struct ot_workstruct *ws,
562 */ 631 */
563 632
564 /* Scanned whole query string */ 633 /* Scanned whole query string */
565 if( !ws->hash ) 634 if (!ws->hash)
566 return ws->reply_size = sprintf( ws->reply, "d14:failure reason80:Your client forgot to send your torrent's info_hash. Please upgrade your client.e" ); 635 return ws->reply_size = sprintf(ws->reply, "d14:failure reason80:Your client forgot to send your torrent's info_hash. Please upgrade your client.e");
567 636
568 if( OT_PEERFLAG( &ws->peer ) & PEER_FLAG_STOPPED ) 637 if (OT_PEERFLAG(&ws->peer) & PEER_FLAG_STOPPED)
569 ws->reply_size = remove_peer_from_torrent( FLAG_TCP, ws ); 638 ws->reply_size = remove_peer_from_torrent(FLAG_TCP, ws);
570 else 639 else
571 ws->reply_size = add_peer_to_torrent_and_return_peers( FLAG_TCP, ws, numwant ); 640 ws->reply_size = add_peer_to_torrent_and_return_peers(FLAG_TCP, ws, numwant);
572 641
573 stats_issue_event( EVENT_ANNOUNCE, FLAG_TCP, ws->reply_size); 642 stats_issue_event(EVENT_ANNOUNCE, FLAG_TCP, ws->reply_size);
574 return ws->reply_size; 643 return ws->reply_size;
575} 644}
576 645
577ssize_t http_handle_request( const int64 sock, struct ot_workstruct *ws ) { 646ssize_t http_handle_request(const int64 sock, struct ot_workstruct *ws) {
578 ssize_t reply_off, len; 647 ssize_t reply_off, len;
579 char *read_ptr = ws->request, *write_ptr; 648 char *read_ptr = ws->request, *write_ptr;
580 649
581#ifdef WANT_FULLLOG_NETWORKS 650#ifdef WANT_FULLLOG_NETWORKS
582 struct http_data *cookie = io_getcookie( sock ); 651 struct http_data *cookie = io_getcookie(sock);
583 if( loglist_check_address( cookie->ip ) ) { 652 if (loglist_check_address(cookie->ip)) {
584 ot_log *log = malloc( sizeof( ot_log ) ); 653 ot_log *log = malloc(sizeof(ot_log));
585 if( log ) { 654 if (log) {
586 log->size = ws->request_size; 655 log->size = ws->request_size;
587 log->data = malloc( ws->request_size ); 656 log->data = malloc(ws->request_size);
588 log->next = 0; 657 log->next = 0;
589 log->time = g_now_seconds; 658 log->time = g_now_seconds;
590 memcpy( log->ip, cookie->ip, sizeof(ot_ip6)); 659 memcpy(log->ip, cookie->ip, sizeof(ot_ip6));
591 if( log->data ) { 660 if (log->data) {
592 memcpy( log->data, ws->request, ws->request_size ); 661 memcpy(log->data, ws->request, ws->request_size);
593 if( !g_logchain_first ) 662 if (!g_logchain_first)
594 g_logchain_first = g_logchain_last = log; 663 g_logchain_first = g_logchain_last = log;
595 else { 664 else {
596 g_logchain_last->next = log; 665 g_logchain_last->next = log;
597 g_logchain_last = log; 666 g_logchain_last = log;
598 } 667 }
599 } else 668 } else
600 free( log ); 669 free(log);
601 } 670 }
602 } 671 }
603#endif 672#endif
604 673
605#ifdef _DEBUG_HTTPERROR 674#ifdef _DEBUG_HTTPERROR
606 reply_off = ws->request_size; 675 reply_off = ws->request_size;
607 if( ws->request_size >= G_DEBUGBUF_SIZE ) 676 if (ws->request_size >= G_DEBUGBUF_SIZE)
608 reply_off = G_DEBUGBUF_SIZE - 1; 677 reply_off = G_DEBUGBUF_SIZE - 1;
609 memcpy( ws->debugbuf, ws->request, reply_off ); 678 memcpy(ws->debugbuf, ws->request, reply_off);
610 ws->debugbuf[ reply_off ] = 0; 679 ws->debugbuf[reply_off] = 0;
611#endif 680#endif
612 681
613 /* Tell subroutines where to put reply data */ 682 /* Tell subroutines where to put reply data */
614 ws->reply = ws->outbuf + SUCCESS_HTTP_HEADER_LENGTH; 683 ws->reply = ws->outbuf + SUCCESS_HTTP_HEADER_LENGTH;
615 684
616 /* This one implicitely tests strlen < 5, too -- remember, it is \n terminated */ 685 /* This one implicitely tests strlen < 5, too -- remember, it is \n terminated */
617 if( memcmp( read_ptr, "GET /", 5) ) HTTPERROR_400; 686 if (memcmp(read_ptr, "GET /", 5))
687 HTTPERROR_400;
618 688
619 /* Skip leading '/' */ 689 /* Skip leading '/' */
620 for( read_ptr+=4; *read_ptr == '/'; ++read_ptr); 690 for (read_ptr += 4; *read_ptr == '/'; ++read_ptr)
691 ;
621 692
622 /* Try to parse the request. 693 /* Try to parse the request.
623 In reality we abandoned requiring the url to be correct. This now 694 In reality we abandoned requiring the url to be correct. This now
624 only decodes url encoded characters, we check for announces and 695 only decodes url encoded characters, we check for announces and
625 scrapes by looking for "a*" or "sc" */ 696 scrapes by looking for "a*" or "sc" */
626 len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_PATH ); 697 len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_PATH);
627 698
628 /* If parsing returned an error, leave with not found */ 699 /* If parsing returned an error, leave with not found */
629 if( g_redirecturl && ( len == -2 ) ) HTTPERROR_302; 700 if (g_redirecturl && (len == -2))
630 if( len <= 0 ) HTTPERROR_404; 701 HTTPERROR_302;
702 if (len <= 0)
703 HTTPERROR_404;
631 704
632 /* This is the hardcore match for announce*/ 705 /* This is the hardcore match for announce*/
633 if( ( *write_ptr == 'a' ) || ( *write_ptr == '?' ) ) 706 if ((*write_ptr == 'a') || (*write_ptr == '?'))
634 http_handle_announce( sock, ws, read_ptr ); 707 http_handle_announce(sock, ws, read_ptr);
635#ifdef WANT_FULLSCRAPE 708#ifdef WANT_FULLSCRAPE
636 else if( !memcmp( write_ptr, "scrape HTTP/", 12 ) ) 709 else if (!memcmp(write_ptr, "scrape HTTP/", 12))
637 http_handle_fullscrape( sock, ws ); 710 http_handle_fullscrape(sock, ws);
638#endif 711#endif
639 /* This is the hardcore match for scrape */ 712 /* This is the hardcore match for scrape */
640 else if( !memcmp( write_ptr, "sc", 2 ) ) 713 else if (!memcmp(write_ptr, "sc", 2))
641 http_handle_scrape( sock, ws, read_ptr ); 714 http_handle_scrape(sock, ws, read_ptr);
642 /* All the rest is matched the standard way */ 715 /* All the rest is matched the standard way */
643 else if( len == g_stats_path_len && !memcmp( write_ptr, g_stats_path, len ) ) 716 else if (len == g_stats_path_len && !memcmp(write_ptr, g_stats_path, len))
644 http_handle_stats( sock, ws, read_ptr ); 717 http_handle_stats(sock, ws, read_ptr);
645 else 718 else
646 HTTPERROR_404; 719 HTTPERROR_404;
647 720
648 /* Find out if the client wants to keep this connection alive */ 721 /* Find out if the client wants to keep this connection alive */
649 ws->keep_alive = 0; 722 ws->keep_alive = 0;
650#ifdef WANT_KEEPALIVE 723#ifdef WANT_KEEPALIVE
651 read_ptr=http_header( ws->request, ws->header_size, "connection"); 724 read_ptr = http_header(ws->request, ws->header_size, "connection");
652 if( read_ptr && ( *read_ptr == 'K' || *read_ptr == 'k' ) ) ws->keep_alive = 1; 725 if (read_ptr && (*read_ptr == 'K' || *read_ptr == 'k'))
726 ws->keep_alive = 1;
653#endif 727#endif
654 728
655 /* If routines handled sending themselves, just return */ 729 /* If routines handled sending themselves, just return */
656 if( ws->reply_size == -2 ) return 0; 730 if (ws->reply_size == -2)
731 return 0;
657 /* If routine failed, let http error take over */ 732 /* If routine failed, let http error take over */
658 if( ws->reply_size <= 0 ) HTTPERROR_500; 733 if (ws->reply_size <= 0)
734 HTTPERROR_500;
659 735
660 /* This one is rather ugly, so I take you step by step through it. 736 /* This one is rather ugly, so I take you step by step through it.
661 737
@@ -664,17 +740,17 @@ ssize_t http_handle_request( const int64 sock, struct ot_workstruct *ws ) {
664 plus dynamic space needed to expand our Content-Length value. We reserve SUCCESS_HTTP_SIZE_OFF for its expansion and calculate 740 plus dynamic space needed to expand our Content-Length value. We reserve SUCCESS_HTTP_SIZE_OFF for its expansion and calculate
665 the space NOT needed to expand in reply_off 741 the space NOT needed to expand in reply_off
666 */ 742 */
667 reply_off = SUCCESS_HTTP_SIZE_OFF - snprintf( ws->outbuf, 0, "%zd", ws->reply_size ); 743 reply_off = SUCCESS_HTTP_SIZE_OFF - snprintf(ws->outbuf, 0, "%zd", ws->reply_size);
668 ws->reply = ws->outbuf + reply_off; 744 ws->reply = ws->outbuf + reply_off;
669 745
670 /* 2. Now we sprintf our header so that sprintf writes its terminating '\0' exactly one byte before content starts. Complete 746 /* 2. Now we sprintf our header so that sprintf writes its terminating '\0' exactly one byte before content starts. Complete
671 packet size is increased by size of header plus one byte '\n', we will copy over '\0' in next step */ 747 packet size is increased by size of header plus one byte '\n', we will copy over '\0' in next step */
672 ws->reply_size += 1 + sprintf( ws->reply, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r", ws->reply_size ); 748 ws->reply_size += 1 + sprintf(ws->reply, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r", ws->reply_size);
673 749
674 /* 3. Finally we join both blocks neatly */ 750 /* 3. Finally we join both blocks neatly */
675 ws->outbuf[ SUCCESS_HTTP_HEADER_LENGTH - 1 ] = '\n'; 751 ws->outbuf[SUCCESS_HTTP_HEADER_LENGTH - 1] = '\n';
676 752
677 http_senddata( sock, ws ); 753 http_senddata(sock, ws);
678 return ws->reply_size; 754 return ws->reply_size;
679} 755}
680 756
diff --git a/ot_http.h b/ot_http.h
index a63e3d3..fecb4eb 100644
--- a/ot_http.h
+++ b/ot_http.h
@@ -22,9 +22,9 @@ struct http_data {
22 STRUCT_HTTP_FLAG flag; 22 STRUCT_HTTP_FLAG flag;
23}; 23};
24 24
25ssize_t http_handle_request( const int64 s, struct ot_workstruct *ws ); 25ssize_t http_handle_request(const int64 s, struct ot_workstruct *ws);
26ssize_t http_sendiovecdata( const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial ); 26ssize_t http_sendiovecdata(const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial);
27ssize_t http_issue_error( const int64 s, struct ot_workstruct *ws, int code ); 27ssize_t http_issue_error(const int64 s, struct ot_workstruct *ws, int code);
28 28
29extern char *g_stats_path; 29extern char *g_stats_path;
30extern ssize_t g_stats_path_len; 30extern ssize_t g_stats_path_len;
diff --git a/ot_iovec.c b/ot_iovec.c
index f9567a9..9fb30cc 100644
--- a/ot_iovec.c
+++ b/ot_iovec.c
@@ -4,90 +4,89 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <sys/types.h>
8#include <stdlib.h> 7#include <stdlib.h>
9#include <unistd.h> 8#include <sys/types.h>
10#include <sys/uio.h> 9#include <sys/uio.h>
10#include <unistd.h>
11 11
12/* Libowfat */ 12/* Libowfat */
13 13
14/* Opentracker */ 14/* Opentracker */
15#include "ot_iovec.h" 15#include "ot_iovec.h"
16 16
17void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc ) { 17void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc) {
18 void *new_data; 18 void *new_data;
19 int new_entries = 1 + *iovec_entries; 19 int new_entries = 1 + *iovec_entries;
20 struct iovec *new_vec = realloc( *iovector, new_entries * sizeof( struct iovec ) ); 20 struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec));
21 21
22 if( !new_vec ) 22 if (!new_vec)
23 return NULL; 23 return NULL;
24 24
25 /* Only allocate after we have a place to store the pointer */ 25 /* Only allocate after we have a place to store the pointer */
26 new_data = malloc( new_alloc ); 26 new_data = malloc(new_alloc);
27 if( !new_data ) 27 if (!new_data)
28 return NULL; 28 return NULL;
29 29
30 new_vec[new_entries - 1].iov_base = new_data; 30 new_vec[new_entries - 1].iov_base = new_data;
31 new_vec[new_entries - 1].iov_len = new_alloc; 31 new_vec[new_entries - 1].iov_len = new_alloc;
32 32
33 *iovector = new_vec; 33 *iovector = new_vec;
34 ++*iovec_entries; 34 ++*iovec_entries;
35 return new_data; 35 return new_data;
36} 36}
37 37
38void *iovec_append( int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector) { 38void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector) {
39 int new_entries = *iovec_entries + 1; 39 int new_entries = *iovec_entries + 1;
40 struct iovec *new_vec = realloc( *iovector, new_entries * sizeof( struct iovec ) ); 40 struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec));
41 if( !new_vec ) 41 if (!new_vec)
42 return NULL; 42 return NULL;
43 43
44 /* Take over data from appended iovec */ 44 /* Take over data from appended iovec */
45 new_vec[*iovec_entries].iov_base = append_iovector->iov_base; 45 new_vec[*iovec_entries].iov_base = append_iovector->iov_base;
46 new_vec[*iovec_entries].iov_len = append_iovector->iov_len; 46 new_vec[*iovec_entries].iov_len = append_iovector->iov_len;
47 47
48 append_iovector->iov_base = NULL; 48 append_iovector->iov_base = NULL;
49 append_iovector->iov_len = 0; 49 append_iovector->iov_len = 0;
50 50
51 *iovector = new_vec; 51 *iovector = new_vec;
52 *iovec_entries = new_entries; 52 *iovec_entries = new_entries;
53 53
54 return new_vec; 54 return new_vec;
55} 55}
56 56
57 57void iovec_free(int *iovec_entries, struct iovec **iovector) {
58void iovec_free( int *iovec_entries, struct iovec **iovector ) {
59 int i; 58 int i;
60 for( i=0; i<*iovec_entries; ++i ) 59 for (i = 0; i < *iovec_entries; ++i)
61 free( ((*iovector)[i]).iov_base ); 60 free(((*iovector)[i]).iov_base);
62 *iovector = NULL; 61 *iovector = NULL;
63 *iovec_entries = 0; 62 *iovec_entries = 0;
64} 63}
65 64
66void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr ) { 65void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr) {
67 if( *iovec_entries ) { 66 if (*iovec_entries) {
68 char * base = (char*)((*iovector)[ *iovec_entries - 1 ]).iov_base; 67 char *base = (char *)((*iovector)[*iovec_entries - 1]).iov_base;
69 size_t new_alloc = ((char*)last_ptr) - base; 68 size_t new_alloc = ((char *)last_ptr) - base;
70 69
71 ((*iovector)[*iovec_entries - 1 ]).iov_base = realloc( base, new_alloc ); 70 ((*iovector)[*iovec_entries - 1]).iov_base = realloc(base, new_alloc);
72 ((*iovector)[*iovec_entries - 1 ]).iov_len = new_alloc; 71 ((*iovector)[*iovec_entries - 1]).iov_len = new_alloc;
73 } 72 }
74} 73}
75 74
76void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc ) { 75void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc) {
77 void *new_data; 76 void *new_data;
78 77
79 iovec_fixlast( iovec_entries, iovector, last_ptr ); 78 iovec_fixlast(iovec_entries, iovector, last_ptr);
80 79
81 if( !( new_data = iovec_increase( iovec_entries, iovector, new_alloc ) ) ) 80 if (!(new_data = iovec_increase(iovec_entries, iovector, new_alloc)))
82 iovec_free( iovec_entries, iovector ); 81 iovec_free(iovec_entries, iovector);
83 82
84 return new_data; 83 return new_data;
85} 84}
86 85
87size_t iovec_length( const int *iovec_entries, const struct iovec **iovector ) { 86size_t iovec_length(const int *iovec_entries, const struct iovec **iovector) {
88 size_t length = 0; 87 size_t length = 0;
89 int i; 88 int i;
90 for( i=0; i<*iovec_entries; ++i ) 89 for (i = 0; i < *iovec_entries; ++i)
91 length += ((*iovector)[i]).iov_len; 90 length += ((*iovector)[i]).iov_len;
92 return length; 91 return length;
93} 92}
diff --git a/ot_iovec.h b/ot_iovec.h
index bb953c3..4317ab7 100644
--- a/ot_iovec.h
+++ b/ot_iovec.h
@@ -8,13 +8,13 @@
8 8
9#include <sys/uio.h> 9#include <sys/uio.h>
10 10
11void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc ); 11void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc);
12void *iovec_append( int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector ); 12void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector);
13void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr ); 13void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr);
14void iovec_free( int *iovec_entries, struct iovec **iovector ); 14void iovec_free(int *iovec_entries, struct iovec **iovector);
15 15
16size_t iovec_length( const int *iovec_entries, const struct iovec **iovector ); 16size_t iovec_length(const int *iovec_entries, const struct iovec **iovector);
17 17
18void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc ); 18void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc);
19 19
20#endif 20#endif
diff --git a/ot_livesync.c b/ot_livesync.c
index 335cce5..246317b 100644
--- a/ot_livesync.c
+++ b/ot_livesync.c
@@ -4,126 +4,126 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <pthread.h>
8#include <stdlib.h>
9#include <string.h>
7#include <sys/types.h> 10#include <sys/types.h>
8#include <sys/uio.h> 11#include <sys/uio.h>
9#include <string.h>
10#include <pthread.h>
11#include <unistd.h> 12#include <unistd.h>
12#include <stdlib.h>
13 13
14/* Libowfat */ 14/* Libowfat */
15#include "socket.h"
16#include "ndelay.h"
17#include "byte.h" 15#include "byte.h"
18#include "ip6.h" 16#include "ip6.h"
17#include "ndelay.h"
18#include "socket.h"
19 19
20/* Opentracker */ 20/* Opentracker */
21#include "trackerlogic.h"
22#include "ot_livesync.h"
23#include "ot_accesslist.h" 21#include "ot_accesslist.h"
24#include "ot_stats.h" 22#include "ot_livesync.h"
25#include "ot_mutex.h" 23#include "ot_mutex.h"
24#include "ot_stats.h"
25#include "trackerlogic.h"
26 26
27#ifdef WANT_SYNC_LIVE 27#ifdef WANT_SYNC_LIVE
28 28
29char groupip_1[4] = { 224,0,23,5 }; 29char groupip_1[4] = {224, 0, 23, 5};
30 30
31#define LIVESYNC_INCOMING_BUFFSIZE (256*256) 31#define LIVESYNC_INCOMING_BUFFSIZE (256 * 256)
32 32
33#define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480 33#define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480
34#define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer)+sizeof(ot_hash)) 34#define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer) + sizeof(ot_hash))
35 35
36#define LIVESYNC_MAXDELAY 15 /* seconds */ 36#define LIVESYNC_MAXDELAY 15 /* seconds */
37 37
38enum { OT_SYNC_PEER4, OT_SYNC_PEER6 }; 38enum { OT_SYNC_PEER4, OT_SYNC_PEER6 };
39 39
40/* Forward declaration */ 40/* Forward declaration */
41static void * livesync_worker( void * args ); 41static void *livesync_worker(void *args);
42 42
43/* For outgoing packets */ 43/* For outgoing packets */
44static int64 g_socket_in = -1; 44static int64 g_socket_in = -1;
45 45
46/* For incoming packets */ 46/* For incoming packets */
47static int64 g_socket_out = -1; 47static int64 g_socket_out = -1;
48 48
49static pthread_mutex_t g_outbuf_mutex = PTHREAD_MUTEX_INITIALIZER; 49static pthread_mutex_t g_outbuf_mutex = PTHREAD_MUTEX_INITIALIZER;
50typedef struct { 50typedef struct {
51 uint8_t data[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; 51 uint8_t data[LIVESYNC_OUTGOING_BUFFSIZE_PEERS];
52 size_t fill; 52 size_t fill;
53 ot_time next_packet_time; 53 ot_time next_packet_time;
54} sync_buffer; 54} sync_buffer;
55 55
56static sync_buffer g_v6_buf; 56static sync_buffer g_v6_buf;
57static sync_buffer g_v4_buf; 57static sync_buffer g_v4_buf;
58 58
59static pthread_t thread_id; 59static pthread_t thread_id;
60void livesync_init( ) { 60void livesync_init() {
61 61
62 if( g_socket_in == -1 ) 62 if (g_socket_in == -1)
63 exerr( "No socket address for live sync specified." ); 63 exerr("No socket address for live sync specified.");
64 64
65 /* Prepare outgoing peers buffer */ 65 /* Prepare outgoing peers buffer */
66 memcpy( g_v6_buf.data, &g_tracker_id, sizeof( g_tracker_id ) ); 66 memcpy(g_v6_buf.data, &g_tracker_id, sizeof(g_tracker_id));
67 memcpy( g_v4_buf.data, &g_tracker_id, sizeof( g_tracker_id ) ); 67 memcpy(g_v4_buf.data, &g_tracker_id, sizeof(g_tracker_id));
68 68
69 uint32_pack_big( (char*)g_v6_buf.data + sizeof( g_tracker_id ), OT_SYNC_PEER6); 69 uint32_pack_big((char *)g_v6_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER6);
70 uint32_pack_big( (char*)g_v4_buf.data + sizeof( g_tracker_id ), OT_SYNC_PEER4); 70 uint32_pack_big((char *)g_v4_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER4);
71 71
72 g_v6_buf.fill = sizeof( g_tracker_id ) + sizeof( uint32_t ); 72 g_v6_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t);
73 g_v4_buf.fill = sizeof( g_tracker_id ) + sizeof( uint32_t ); 73 g_v4_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t);
74 74
75 g_v6_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; 75 g_v6_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
76 g_v4_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; 76 g_v4_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
77 77
78 pthread_create( &thread_id, NULL, livesync_worker, NULL ); 78 pthread_create(&thread_id, NULL, livesync_worker, NULL);
79} 79}
80 80
81void livesync_deinit() { 81void livesync_deinit() {
82 if( g_socket_in != -1 ) 82 if (g_socket_in != -1)
83 close( g_socket_in ); 83 close(g_socket_in);
84 if( g_socket_out != -1 ) 84 if (g_socket_out != -1)
85 close( g_socket_out ); 85 close(g_socket_out);
86 86
87 pthread_cancel( thread_id ); 87 pthread_cancel(thread_id);
88} 88}
89 89
90void livesync_bind_mcast( ot_ip6 ip, uint16_t port) { 90void livesync_bind_mcast(ot_ip6 ip, uint16_t port) {
91 char tmpip[4] = {0,0,0,0}; 91 char tmpip[4] = {0, 0, 0, 0};
92 char *v4ip; 92 char *v4ip;
93 93
94 if( !ip6_isv4mapped(ip)) 94 if (!ip6_isv4mapped(ip))
95 exerr("v6 mcast support not yet available."); 95 exerr("v6 mcast support not yet available.");
96 v4ip = ip+12; 96 v4ip = ip + 12;
97 97
98 if( g_socket_in != -1 ) 98 if (g_socket_in != -1)
99 exerr("Error: Livesync listen ip specified twice."); 99 exerr("Error: Livesync listen ip specified twice.");
100 100
101 if( ( g_socket_in = socket_udp4( )) < 0) 101 if ((g_socket_in = socket_udp4()) < 0)
102 exerr("Error: Cant create live sync incoming socket." ); 102 exerr("Error: Cant create live sync incoming socket.");
103 ndelay_off(g_socket_in); 103 ndelay_off(g_socket_in);
104 104
105 if( socket_bind4_reuse( g_socket_in, tmpip, port ) == -1 ) 105 if (socket_bind4_reuse(g_socket_in, tmpip, port) == -1)
106 exerr("Error: Cant bind live sync incoming socket." ); 106 exerr("Error: Cant bind live sync incoming socket.");
107 107
108 if( socket_mcjoin4( g_socket_in, groupip_1, v4ip ) ) 108 if (socket_mcjoin4(g_socket_in, groupip_1, v4ip))
109 exerr("Error: Cant make live sync incoming socket join mcast group."); 109 exerr("Error: Cant make live sync incoming socket join mcast group.");
110 110
111 if( ( g_socket_out = socket_udp4()) < 0) 111 if ((g_socket_out = socket_udp4()) < 0)
112 exerr("Error: Cant create live sync outgoing socket." ); 112 exerr("Error: Cant create live sync outgoing socket.");
113 if( socket_bind4_reuse( g_socket_out, v4ip, port ) == -1 ) 113 if (socket_bind4_reuse(g_socket_out, v4ip, port) == -1)
114 exerr("Error: Cant bind live sync outgoing socket." ); 114 exerr("Error: Cant bind live sync outgoing socket.");
115 115
116 socket_mcttl4(g_socket_out, 1); 116 socket_mcttl4(g_socket_out, 1);
117 socket_mcloop4(g_socket_out, 0); 117 socket_mcloop4(g_socket_out, 0);
118} 118}
119 119
120/* Caller MUST hold g_outbuf_mutex. Returns with g_outbuf_mutex unlocked */ 120/* Caller MUST hold g_outbuf_mutex. Returns with g_outbuf_mutex unlocked */
121static void livesync_issue_peersync( sync_buffer *buf ) { 121static void livesync_issue_peersync(sync_buffer *buf) {
122 char mycopy[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; 122 char mycopy[LIVESYNC_OUTGOING_BUFFSIZE_PEERS];
123 size_t fill = buf->fill; 123 size_t fill = buf->fill;
124 124
125 memcpy( mycopy, buf->data, fill ); 125 memcpy(mycopy, buf->data, fill);
126 buf->fill = sizeof( g_tracker_id ) + sizeof( uint32_t ); 126 buf->fill = sizeof(g_tracker_id) + sizeof(uint32_t);
127 buf->next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; 127 buf->next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
128 128
129 /* From now this thread has a local copy of the buffer and 129 /* From now this thread has a local copy of the buffer and
@@ -133,101 +133,99 @@ static void livesync_issue_peersync( sync_buffer *buf ) {
133 socket_send4(g_socket_out, mycopy, fill, groupip_1, LIVESYNC_PORT); 133 socket_send4(g_socket_out, mycopy, fill, groupip_1, LIVESYNC_PORT);
134} 134}
135 135
136static void livesync_handle_peersync( struct ot_workstruct *ws, size_t peer_size ) { 136static void livesync_handle_peersync(struct ot_workstruct *ws, size_t peer_size) {
137 size_t off = sizeof( g_tracker_id ) + sizeof( uint32_t ); 137 size_t off = sizeof(g_tracker_id) + sizeof(uint32_t);
138 138
139 /* Now basic sanity checks have been done on the live sync packet 139 /* Now basic sanity checks have been done on the live sync packet
140 We might add more testing and logging. */ 140 We might add more testing and logging. */
141 while( (ssize_t)(off + sizeof( ot_hash ) + peer_size) <= ws->request_size ) { 141 while ((ssize_t)(off + sizeof(ot_hash) + peer_size) <= ws->request_size) {
142 memcpy( &ws->peer, ws->request + off + sizeof(ot_hash), peer_size ); 142 memcpy(&ws->peer, ws->request + off + sizeof(ot_hash), peer_size);
143 ws->hash = (ot_hash*)(ws->request + off); 143 ws->hash = (ot_hash *)(ws->request + off);
144 144
145 if( !g_opentracker_running ) return; 145 if (!g_opentracker_running)
146 return;
146 147
147 if( OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED ) 148 if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED)
148 remove_peer_from_torrent( FLAG_MCA, ws ); 149 remove_peer_from_torrent(FLAG_MCA, ws);
149 else 150 else
150 add_peer_to_torrent_and_return_peers( FLAG_MCA, ws, /* amount = */ 0 ); 151 add_peer_to_torrent_and_return_peers(FLAG_MCA, ws, /* amount = */ 0);
151 152
152 off += sizeof( ot_hash ) + peer_size; 153 off += sizeof(ot_hash) + peer_size;
153 } 154 }
154 155
155 stats_issue_event(EVENT_SYNC, 0, 156 stats_issue_event(EVENT_SYNC, 0, (ws->request_size - sizeof(g_tracker_id) - sizeof(uint32_t)) / ((ssize_t)sizeof(ot_hash) + peer_size));
156 (ws->request_size - sizeof( g_tracker_id ) - sizeof( uint32_t ) ) /
157 ((ssize_t)sizeof( ot_hash ) + peer_size));
158} 157}
159 158
160/* Tickle the live sync module from time to time, so no events get 159/* Tickle the live sync module from time to time, so no events get
161 stuck when there's not enough traffic to fill udp packets fast 160 stuck when there's not enough traffic to fill udp packets fast
162 enough */ 161 enough */
163void livesync_ticker( ) { 162void livesync_ticker() {
164 /* livesync_issue_peersync sets g_next_packet_time */ 163 /* livesync_issue_peersync sets g_next_packet_time */
165 pthread_mutex_lock(&g_outbuf_mutex); 164 pthread_mutex_lock(&g_outbuf_mutex);
166 if( g_now_seconds > g_v6_buf.next_packet_time && 165 if (g_now_seconds > g_v6_buf.next_packet_time && g_v6_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t))
167 g_v6_buf.fill > sizeof( g_tracker_id ) + sizeof( uint32_t ) )
168 livesync_issue_peersync(&g_v6_buf); 166 livesync_issue_peersync(&g_v6_buf);
169 else 167 else
170 pthread_mutex_unlock(&g_outbuf_mutex); 168 pthread_mutex_unlock(&g_outbuf_mutex);
171 169
172 pthread_mutex_lock(&g_outbuf_mutex); 170 pthread_mutex_lock(&g_outbuf_mutex);
173 if( g_now_seconds > g_v4_buf.next_packet_time && 171 if (g_now_seconds > g_v4_buf.next_packet_time && g_v4_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t))
174 g_v4_buf.fill > sizeof( g_tracker_id ) + sizeof( uint32_t ) )
175 livesync_issue_peersync(&g_v4_buf); 172 livesync_issue_peersync(&g_v4_buf);
176 else 173 else
177 pthread_mutex_unlock(&g_outbuf_mutex); 174 pthread_mutex_unlock(&g_outbuf_mutex);
178} 175}
179 176
180/* Inform live sync about whats going on. */ 177/* Inform live sync about whats going on. */
181void livesync_tell( struct ot_workstruct *ws ) { 178void livesync_tell(struct ot_workstruct *ws) {
182 size_t peer_size; /* initialized in next line */ 179 size_t peer_size; /* initialized in next line */
183 ot_peer *peer_src = peer_from_peer6(&ws->peer, &peer_size); 180 ot_peer *peer_src = peer_from_peer6(&ws->peer, &peer_size);
184 sync_buffer *dest_buf = peer_size == OT_PEER_SIZE6 ? &g_v6_buf : &g_v4_buf; 181 sync_buffer *dest_buf = peer_size == OT_PEER_SIZE6 ? &g_v6_buf : &g_v4_buf;
185 182
186 pthread_mutex_lock(&g_outbuf_mutex); 183 pthread_mutex_lock(&g_outbuf_mutex);
187 184
188 memcpy( dest_buf->data + dest_buf->fill, ws->hash, sizeof(ot_hash) ); 185 memcpy(dest_buf->data + dest_buf->fill, ws->hash, sizeof(ot_hash));
189 dest_buf->fill += sizeof(ot_hash); 186 dest_buf->fill += sizeof(ot_hash);
190 187
191 memcpy( dest_buf->data + dest_buf->fill, peer_src, peer_size ); 188 memcpy(dest_buf->data + dest_buf->fill, peer_src, peer_size);
192 dest_buf->fill += peer_size; 189 dest_buf->fill += peer_size;
193 190
194 if( dest_buf->fill >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS ) 191 if (dest_buf->fill >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS)
195 livesync_issue_peersync(dest_buf); 192 livesync_issue_peersync(dest_buf);
196 else 193 else
197 pthread_mutex_unlock(&g_outbuf_mutex); 194 pthread_mutex_unlock(&g_outbuf_mutex);
198} 195}
199 196
200static void * livesync_worker( void * args ) { 197static void *livesync_worker(void *args) {
201 struct ot_workstruct ws; 198 struct ot_workstruct ws;
202 ot_ip6 in_ip; uint16_t in_port; 199 ot_ip6 in_ip;
200 uint16_t in_port;
203 201
204 (void)args; 202 (void)args;
205 203
206 /* Initialize our "thread local storage" */ 204 /* Initialize our "thread local storage" */
207 ws.inbuf = ws.request = malloc( LIVESYNC_INCOMING_BUFFSIZE ); 205 ws.inbuf = ws.request = malloc(LIVESYNC_INCOMING_BUFFSIZE);
208 ws.outbuf = ws.reply = 0; 206 ws.outbuf = ws.reply = 0;
209 207
210 memcpy( in_ip, V4mappedprefix, sizeof( V4mappedprefix ) ); 208 memcpy(in_ip, V4mappedprefix, sizeof(V4mappedprefix));
211 209
212 while( 1 ) { 210 while (1) {
213 ws.request_size = socket_recv4(g_socket_in, (char*)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12+(char*)in_ip, &in_port); 211 ws.request_size = socket_recv4(g_socket_in, (char *)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12 + (char *)in_ip, &in_port);
214 212
215 /* Expect at least tracker id and packet type */ 213 /* Expect at least tracker id and packet type */
216 if( ws.request_size <= (ssize_t)(sizeof( g_tracker_id ) + sizeof( uint32_t )) ) 214 if (ws.request_size <= (ssize_t)(sizeof(g_tracker_id) + sizeof(uint32_t)))
217 continue; 215 continue;
218 if( !accesslist_is_blessed(in_ip, OT_PERMISSION_MAY_LIVESYNC)) 216 if (!accesslist_is_blessed(in_ip, OT_PERMISSION_MAY_LIVESYNC))
219 continue; 217 continue;
220 if( !memcmp( ws.inbuf, &g_tracker_id, sizeof( g_tracker_id ) ) ) { 218 if (!memcmp(ws.inbuf, &g_tracker_id, sizeof(g_tracker_id))) {
221 /* TODO: log packet coming from ourselves */ 219 /* TODO: log packet coming from ourselves */
222 continue; 220 continue;
223 } 221 }
224 222
225 switch( uint32_read_big( sizeof( g_tracker_id ) + (char *)ws.inbuf ) ) { 223 switch (uint32_read_big(sizeof(g_tracker_id) + (char *)ws.inbuf)) {
226 case OT_SYNC_PEER6: 224 case OT_SYNC_PEER6:
227 livesync_handle_peersync( &ws, OT_PEER_SIZE6 ); 225 livesync_handle_peersync(&ws, OT_PEER_SIZE6);
228 break; 226 break;
229 case OT_SYNC_PEER4: 227 case OT_SYNC_PEER4:
230 livesync_handle_peersync( &ws, OT_PEER_SIZE4 ); 228 livesync_handle_peersync(&ws, OT_PEER_SIZE4);
231 break; 229 break;
232 default: 230 default:
233 break; 231 break;
diff --git a/ot_livesync.h b/ot_livesync.h
index 41bfc2e..cb28774 100644
--- a/ot_livesync.h
+++ b/ot_livesync.h
@@ -51,18 +51,18 @@ void livesync_init();
51void livesync_deinit(); 51void livesync_deinit();
52 52
53/* Join multicast group for listening and create sending socket */ 53/* Join multicast group for listening and create sending socket */
54void livesync_bind_mcast( char *ip, uint16_t port ); 54void livesync_bind_mcast(char *ip, uint16_t port);
55 55
56/* Inform live sync about whats going on. */ 56/* Inform live sync about whats going on. */
57void livesync_tell( struct ot_workstruct *ws ); 57void livesync_tell(struct ot_workstruct *ws);
58 58
59/* Tickle the live sync module from time to time, so no events get 59/* Tickle the live sync module from time to time, so no events get
60 stuck when there's not enough traffic to fill udp packets fast 60 stuck when there's not enough traffic to fill udp packets fast
61 enough */ 61 enough */
62void livesync_ticker( ); 62void livesync_ticker();
63 63
64/* Handle an incoming live sync packet */ 64/* Handle an incoming live sync packet */
65void handle_livesync( const int64 sock ); 65void handle_livesync(const int64 sock);
66 66
67#else 67#else
68 68
diff --git a/ot_mutex.c b/ot_mutex.c
index 174c4ca..1aa2783 100644
--- a/ot_mutex.c
+++ b/ot_mutex.c
@@ -16,43 +16,39 @@
16#include "uint32.h" 16#include "uint32.h"
17 17
18/* Opentracker */ 18/* Opentracker */
19#include "trackerlogic.h"
20#include "ot_iovec.h" 19#include "ot_iovec.h"
21#include "ot_mutex.h" 20#include "ot_mutex.h"
22#include "ot_stats.h" 21#include "ot_stats.h"
22#include "trackerlogic.h"
23 23
24/* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */ 24/* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */
25#define MTX_DBG( STRING ) 25#define MTX_DBG(STRING)
26 26
27/* Our global all torrents list */ 27/* Our global all torrents list */
28static ot_vector all_torrents[OT_BUCKET_COUNT]; 28static ot_vector all_torrents[OT_BUCKET_COUNT];
29static pthread_mutex_t bucket_mutex[OT_BUCKET_COUNT]; 29static pthread_mutex_t bucket_mutex[OT_BUCKET_COUNT];
30static size_t g_torrent_count; 30static size_t g_torrent_count;
31 31
32/* Self pipe from opentracker.c */ 32/* Self pipe from opentracker.c */
33extern int g_self_pipe[2]; 33extern int g_self_pipe[2];
34 34
35ot_vector *mutex_bucket_lock( int bucket ) { 35ot_vector *mutex_bucket_lock(int bucket) {
36 pthread_mutex_lock(bucket_mutex + bucket ); 36 pthread_mutex_lock(bucket_mutex + bucket);
37 return all_torrents + bucket; 37 return all_torrents + bucket;
38} 38}
39 39
40ot_vector *mutex_bucket_lock_by_hash( ot_hash const hash ) { 40ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash) { return mutex_bucket_lock(uint32_read_big((const char *)hash) >> OT_BUCKET_COUNT_SHIFT); }
41 return mutex_bucket_lock( uint32_read_big( (const char*)hash ) >> OT_BUCKET_COUNT_SHIFT );
42}
43 41
44void mutex_bucket_unlock( int bucket, int delta_torrentcount ) { 42void mutex_bucket_unlock(int bucket, int delta_torrentcount) {
45 pthread_mutex_unlock(bucket_mutex + bucket); 43 pthread_mutex_unlock(bucket_mutex + bucket);
46 g_torrent_count += delta_torrentcount; 44 g_torrent_count += delta_torrentcount;
47} 45}
48 46
49void mutex_bucket_unlock_by_hash( ot_hash const hash, int delta_torrentcount ) { 47void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount) {
50 mutex_bucket_unlock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount ); 48 mutex_bucket_unlock(uint32_read_big((char *)hash) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount);
51} 49}
52 50
53size_t mutex_get_torrent_count( ) { 51size_t mutex_get_torrent_count() { return g_torrent_count; }
54 return g_torrent_count;
55}
56 52
57/* TaskQueue Magic */ 53/* TaskQueue Magic */
58 54
@@ -65,16 +61,16 @@ struct ot_task {
65 struct ot_task *next; 61 struct ot_task *next;
66}; 62};
67 63
68static ot_taskid next_free_taskid = 1; 64static ot_taskid next_free_taskid = 1;
69static struct ot_task *tasklist; 65static struct ot_task *tasklist;
70static pthread_mutex_t tasklist_mutex; 66static pthread_mutex_t tasklist_mutex;
71static pthread_cond_t tasklist_being_filled; 67static pthread_cond_t tasklist_being_filled;
72 68
73int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) { 69int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype) {
74 struct ot_task ** tmptask, * task; 70 struct ot_task **tmptask, *task;
75 71
76 task = malloc(sizeof( struct ot_task)); 72 task = malloc(sizeof(struct ot_task));
77 if( !task ) 73 if (!task)
78 return -1; 74 return -1;
79 75
80 task->taskid = 0; 76 task->taskid = 0;
@@ -85,98 +81,98 @@ int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) {
85 task->next = 0; 81 task->next = 0;
86 82
87 /* Want exclusive access to tasklist */ 83 /* Want exclusive access to tasklist */
88 pthread_mutex_lock( &tasklist_mutex ); 84 pthread_mutex_lock(&tasklist_mutex);
89 85
90 /* Skip to end of list */ 86 /* Skip to end of list */
91 tmptask = &tasklist; 87 tmptask = &tasklist;
92 while( *tmptask ) 88 while (*tmptask)
93 tmptask = &(*tmptask)->next; 89 tmptask = &(*tmptask)->next;
94 *tmptask = task; 90 *tmptask = task;
95 91
96 /* Inform waiting workers and release lock */ 92 /* Inform waiting workers and release lock */
97 pthread_cond_broadcast( &tasklist_being_filled ); 93 pthread_cond_broadcast(&tasklist_being_filled);
98 pthread_mutex_unlock( &tasklist_mutex ); 94 pthread_mutex_unlock(&tasklist_mutex);
99 return 0; 95 return 0;
100} 96}
101 97
102void mutex_workqueue_canceltask( int64 sock ) { 98void mutex_workqueue_canceltask(int64 sock) {
103 struct ot_task ** task; 99 struct ot_task **task;
104 100
105 /* Want exclusive access to tasklist */ 101 /* Want exclusive access to tasklist */
106 pthread_mutex_lock( &tasklist_mutex ); 102 pthread_mutex_lock(&tasklist_mutex);
107 103
108 for (task = &tasklist; *task; task = &((*task)->next)) 104 for (task = &tasklist; *task; task = &((*task)->next))
109 if ((*task)->sock == sock) { 105 if ((*task)->sock == sock) {
110 struct iovec *iovec = (*task)->iovec; 106 struct iovec *iovec = (*task)->iovec;
111 struct ot_task *ptask = *task; 107 struct ot_task *ptask = *task;
112 int i; 108 int i;
113 109
114 /* Free task's iovec */ 110 /* Free task's iovec */
115 for( i=0; i<(*task)->iovec_entries; ++i ) 111 for (i = 0; i < (*task)->iovec_entries; ++i)
116 free( iovec[i].iov_base ); 112 free(iovec[i].iov_base);
117 113
118 *task = (*task)->next; 114 *task = (*task)->next;
119 free( ptask ); 115 free(ptask);
120 break; 116 break;
121 } 117 }
122 118
123 /* Release lock */ 119 /* Release lock */
124 pthread_mutex_unlock( &tasklist_mutex ); 120 pthread_mutex_unlock(&tasklist_mutex);
125} 121}
126 122
127ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ) { 123ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype) {
128 struct ot_task * task; 124 struct ot_task *task;
129 ot_taskid taskid = 0; 125 ot_taskid taskid = 0;
130 126
131 /* Want exclusive access to tasklist */ 127 /* Want exclusive access to tasklist */
132 pthread_mutex_lock( &tasklist_mutex ); 128 pthread_mutex_lock(&tasklist_mutex);
133 129
134 while( !taskid ) { 130 while (!taskid) {
135 /* Skip to the first unassigned task this worker wants to do */ 131 /* Skip to the first unassigned task this worker wants to do */
136 for (task = tasklist; task; task = task->next) 132 for (task = tasklist; task; task = task->next)
137 if (!task->taskid && ( TASK_CLASS_MASK & task->tasktype ) == *tasktype) { 133 if (!task->taskid && (TASK_CLASS_MASK & task->tasktype) == *tasktype) {
138 /* If we found an outstanding task, assign a taskid to it 134 /* If we found an outstanding task, assign a taskid to it
139 and leave the loop */ 135 and leave the loop */
140 task->taskid = taskid = ++next_free_taskid; 136 task->taskid = taskid = ++next_free_taskid;
141 *tasktype = task->tasktype; 137 *tasktype = task->tasktype;
142 break; 138 break;
143 } 139 }
144 140
145 /* Wait until the next task is being fed */ 141 /* Wait until the next task is being fed */
146 if (!taskid) 142 if (!taskid)
147 pthread_cond_wait( &tasklist_being_filled, &tasklist_mutex ); 143 pthread_cond_wait(&tasklist_being_filled, &tasklist_mutex);
148 } 144 }
149 145
150 /* Release lock */ 146 /* Release lock */
151 pthread_mutex_unlock( &tasklist_mutex ); 147 pthread_mutex_unlock(&tasklist_mutex);
152 148
153 return taskid; 149 return taskid;
154} 150}
155 151
156void mutex_workqueue_pushsuccess( ot_taskid taskid ) { 152void mutex_workqueue_pushsuccess(ot_taskid taskid) {
157 struct ot_task ** task; 153 struct ot_task **task;
158 154
159 /* Want exclusive access to tasklist */ 155 /* Want exclusive access to tasklist */
160 pthread_mutex_lock( &tasklist_mutex ); 156 pthread_mutex_lock(&tasklist_mutex);
161 157
162 for (task = &tasklist; *task; task = &((*task)->next)) 158 for (task = &tasklist; *task; task = &((*task)->next))
163 if ((*task)->taskid == taskid) { 159 if ((*task)->taskid == taskid) {
164 struct ot_task *ptask = *task; 160 struct ot_task *ptask = *task;
165 *task = (*task)->next; 161 *task = (*task)->next;
166 free( ptask ); 162 free(ptask);
167 break; 163 break;
168 } 164 }
169 165
170 /* Release lock */ 166 /* Release lock */
171 pthread_mutex_unlock( &tasklist_mutex ); 167 pthread_mutex_unlock(&tasklist_mutex);
172} 168}
173 169
174int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovec ) { 170int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovec) {
175 struct ot_task * task; 171 struct ot_task *task;
176 const char byte = 'o'; 172 const char byte = 'o';
177 173
178 /* Want exclusive access to tasklist */ 174 /* Want exclusive access to tasklist */
179 pthread_mutex_lock( &tasklist_mutex ); 175 pthread_mutex_lock(&tasklist_mutex);
180 176
181 for (task = tasklist; task; task = task->next) 177 for (task = tasklist; task; task = task->next)
182 if (task->taskid == taskid) { 178 if (task->taskid == taskid) {
@@ -187,25 +183,25 @@ int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iove
187 } 183 }
188 184
189 /* Release lock */ 185 /* Release lock */
190 pthread_mutex_unlock( &tasklist_mutex ); 186 pthread_mutex_unlock(&tasklist_mutex);
191 187
192 io_trywrite( g_self_pipe[1], &byte, 1 ); 188 io_trywrite(g_self_pipe[1], &byte, 1);
193 189
194 /* Indicate whether the worker has to throw away results */ 190 /* Indicate whether the worker has to throw away results */
195 return task ? 0 : -1; 191 return task ? 0 : -1;
196} 192}
197 193
198int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec) { 194int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec) {
199 struct ot_task * task; 195 struct ot_task *task;
200 const char byte = 'o'; 196 const char byte = 'o';
201 197
202 /* Want exclusive access to tasklist */ 198 /* Want exclusive access to tasklist */
203 pthread_mutex_lock( &tasklist_mutex ); 199 pthread_mutex_lock(&tasklist_mutex);
204 200
205 for (task = tasklist; task; task = task->next) 201 for (task = tasklist; task; task = task->next)
206 if (task->taskid == taskid) { 202 if (task->taskid == taskid) {
207 if( iovec ) { 203 if (iovec) {
208 if (iovec_append(&task->iovec_entries, &task->iovec, iovec) ) 204 if (iovec_append(&task->iovec_entries, &task->iovec, iovec))
209 task->tasktype = TASK_DONE_PARTIAL; 205 task->tasktype = TASK_DONE_PARTIAL;
210 else 206 else
211 task = NULL; 207 task = NULL;
@@ -215,65 +211,64 @@ int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec) {
215 } 211 }
216 212
217 /* Release lock */ 213 /* Release lock */
218 pthread_mutex_unlock( &tasklist_mutex ); 214 pthread_mutex_unlock(&tasklist_mutex);
219 215
220 io_trywrite( g_self_pipe[1], &byte, 1 ); 216 io_trywrite(g_self_pipe[1], &byte, 1);
221 217
222 /* Indicate whether the worker has to throw away results */ 218 /* Indicate whether the worker has to throw away results */
223 return task ? 0 : -1; 219 return task ? 0 : -1;
224} 220}
225 221
226 222int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovec, int *is_partial) {
227int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec, int *is_partial ) { 223 struct ot_task **task;
228 struct ot_task ** task; 224 int64 sock = -1;
229 int64 sock = -1;
230 225
231 *is_partial = 0; 226 *is_partial = 0;
232 227
233 /* Want exclusive access to tasklist */ 228 /* Want exclusive access to tasklist */
234 pthread_mutex_lock( &tasklist_mutex ); 229 pthread_mutex_lock(&tasklist_mutex);
235 230
236 for (task = &tasklist; *task; task = &((*task)->next)) 231 for (task = &tasklist; *task; task = &((*task)->next))
237 if (((*task)->tasktype & TASK_CLASS_MASK ) == TASK_DONE) { 232 if (((*task)->tasktype & TASK_CLASS_MASK) == TASK_DONE) {
238 struct ot_task *ptask = *task; 233 struct ot_task *ptask = *task;
239 *iovec_entries = ptask->iovec_entries; 234 *iovec_entries = ptask->iovec_entries;
240 *iovec = ptask->iovec; 235 *iovec = ptask->iovec;
241 sock = ptask->sock; 236 sock = ptask->sock;
242 237
243 if ((*task)->tasktype == TASK_DONE) { 238 if ((*task)->tasktype == TASK_DONE) {
244 *task = ptask->next; 239 *task = ptask->next;
245 free( ptask ); 240 free(ptask);
246 } else { 241 } else {
247 ptask->iovec_entries = 0; 242 ptask->iovec_entries = 0;
248 ptask->iovec = NULL; 243 ptask->iovec = NULL;
249 *is_partial = 1; 244 *is_partial = 1;
250 /* Prevent task from showing up immediately again unless new data was added */ 245 /* Prevent task from showing up immediately again unless new data was added */
251 (*task)->tasktype = TASK_FULLSCRAPE; 246 (*task)->tasktype = TASK_FULLSCRAPE;
252 } 247 }
253 break; 248 break;
254 } 249 }
255 250
256 /* Release lock */ 251 /* Release lock */
257 pthread_mutex_unlock( &tasklist_mutex ); 252 pthread_mutex_unlock(&tasklist_mutex);
258 return sock; 253 return sock;
259} 254}
260 255
261void mutex_init( ) { 256void mutex_init() {
262 int i; 257 int i;
263 pthread_mutex_init(&tasklist_mutex, NULL); 258 pthread_mutex_init(&tasklist_mutex, NULL);
264 pthread_cond_init (&tasklist_being_filled, NULL); 259 pthread_cond_init(&tasklist_being_filled, NULL);
265 for (i=0; i < OT_BUCKET_COUNT; ++i) 260 for (i = 0; i < OT_BUCKET_COUNT; ++i)
266 pthread_mutex_init(bucket_mutex + i, NULL); 261 pthread_mutex_init(bucket_mutex + i, NULL);
267 byte_zero( all_torrents, sizeof( all_torrents ) ); 262 byte_zero(all_torrents, sizeof(all_torrents));
268} 263}
269 264
270void mutex_deinit( ) { 265void mutex_deinit() {
271 int i; 266 int i;
272 for (i=0; i < OT_BUCKET_COUNT; ++i) 267 for (i = 0; i < OT_BUCKET_COUNT; ++i)
273 pthread_mutex_destroy(bucket_mutex + i); 268 pthread_mutex_destroy(bucket_mutex + i);
274 pthread_mutex_destroy(&tasklist_mutex); 269 pthread_mutex_destroy(&tasklist_mutex);
275 pthread_cond_destroy(&tasklist_being_filled); 270 pthread_cond_destroy(&tasklist_being_filled);
276 byte_zero( all_torrents, sizeof( all_torrents ) ); 271 byte_zero(all_torrents, sizeof(all_torrents));
277} 272}
278 273
279const char *g_version_mutex_c = "$Source$: $Revision$\n"; 274const char *g_version_mutex_c = "$Source$: $Revision$\n";
diff --git a/ot_mutex.h b/ot_mutex.h
index 9eb17e5..66b627f 100644
--- a/ot_mutex.h
+++ b/ot_mutex.h
@@ -7,72 +7,73 @@
7#define OT_MUTEX_H__ 7#define OT_MUTEX_H__
8 8
9#include <sys/uio.h> 9#include <sys/uio.h>
10#include "trackerlogic.h"
10 11
11void mutex_init( void ); 12void mutex_init(void);
12void mutex_deinit( void ); 13void mutex_deinit(void);
13 14
14ot_vector *mutex_bucket_lock( int bucket ); 15ot_vector *mutex_bucket_lock(int bucket);
15ot_vector *mutex_bucket_lock_by_hash( ot_hash const hash ); 16ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash);
16 17
17void mutex_bucket_unlock( int bucket, int delta_torrentcount ); 18void mutex_bucket_unlock(int bucket, int delta_torrentcount);
18void mutex_bucket_unlock_by_hash( ot_hash const hash, int delta_torrentcount ); 19void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount);
19 20
20size_t mutex_get_torrent_count(void); 21size_t mutex_get_torrent_count(void);
21 22
22typedef enum { 23typedef enum {
23 TASK_STATS_CONNS = 0x0001, 24 TASK_STATS_CONNS = 0x0001,
24 TASK_STATS_TCP = 0x0002, 25 TASK_STATS_TCP = 0x0002,
25 TASK_STATS_UDP = 0x0003, 26 TASK_STATS_UDP = 0x0003,
26 TASK_STATS_SCRAPE = 0x0004, 27 TASK_STATS_SCRAPE = 0x0004,
27 TASK_STATS_FULLSCRAPE = 0x0005, 28 TASK_STATS_FULLSCRAPE = 0x0005,
28 TASK_STATS_TPB = 0x0006, 29 TASK_STATS_TPB = 0x0006,
29 TASK_STATS_HTTPERRORS = 0x0007, 30 TASK_STATS_HTTPERRORS = 0x0007,
30 TASK_STATS_VERSION = 0x0008, 31 TASK_STATS_VERSION = 0x0008,
31 TASK_STATS_BUSY_NETWORKS = 0x0009, 32 TASK_STATS_BUSY_NETWORKS = 0x0009,
32 TASK_STATS_RENEW = 0x000a, 33 TASK_STATS_RENEW = 0x000a,
33 TASK_STATS_SYNCS = 0x000b, 34 TASK_STATS_SYNCS = 0x000b,
34 TASK_STATS_COMPLETED = 0x000c, 35 TASK_STATS_COMPLETED = 0x000c,
35 TASK_STATS_NUMWANTS = 0x000d, 36 TASK_STATS_NUMWANTS = 0x000d,
36 37
37 TASK_STATS = 0x0100, /* Mask */ 38 TASK_STATS = 0x0100, /* Mask */
38 TASK_STATS_TORRENTS = 0x0101, 39 TASK_STATS_TORRENTS = 0x0101,
39 TASK_STATS_PEERS = 0x0102, 40 TASK_STATS_PEERS = 0x0102,
40 TASK_STATS_SLASH24S = 0x0103, 41 TASK_STATS_SLASH24S = 0x0103,
41 TASK_STATS_TOP10 = 0x0104, 42 TASK_STATS_TOP10 = 0x0104,
42 TASK_STATS_TOP100 = 0x0105, 43 TASK_STATS_TOP100 = 0x0105,
43 TASK_STATS_EVERYTHING = 0x0106, 44 TASK_STATS_EVERYTHING = 0x0106,
44 TASK_STATS_FULLLOG = 0x0107, 45 TASK_STATS_FULLLOG = 0x0107,
45 TASK_STATS_WOODPECKERS = 0x0108, 46 TASK_STATS_WOODPECKERS = 0x0108,
46 47
47 TASK_FULLSCRAPE = 0x0200, /* Default mode */ 48 TASK_FULLSCRAPE = 0x0200, /* Default mode */
48 TASK_FULLSCRAPE_TPB_BINARY = 0x0201, 49 TASK_FULLSCRAPE_TPB_BINARY = 0x0201,
49 TASK_FULLSCRAPE_TPB_ASCII = 0x0202, 50 TASK_FULLSCRAPE_TPB_ASCII = 0x0202,
50 TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203, 51 TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203,
51 TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204, 52 TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204,
52 TASK_FULLSCRAPE_TRACKERSTATE = 0x0205, 53 TASK_FULLSCRAPE_TRACKERSTATE = 0x0205,
53 54
54 TASK_DMEM = 0x0300, 55 TASK_DMEM = 0x0300,
55 56
56 TASK_DONE = 0x0f00, 57 TASK_DONE = 0x0f00,
57 TASK_DONE_PARTIAL = 0x0f01, 58 TASK_DONE_PARTIAL = 0x0f01,
58 59
59 TASK_FLAG_GZIP = 0x1000, 60 TASK_FLAG_GZIP = 0x1000,
60 TASK_FLAG_BZIP2 = 0x2000, 61 TASK_FLAG_BZIP2 = 0x2000,
61 TASK_FLAG_CHUNKED = 0x4000, 62 TASK_FLAG_CHUNKED = 0x4000,
62 63
63 TASK_TASK_MASK = 0x0fff, 64 TASK_TASK_MASK = 0x0fff,
64 TASK_CLASS_MASK = 0x0f00, 65 TASK_CLASS_MASK = 0x0f00,
65 TASK_FLAGS_MASK = 0xf000 66 TASK_FLAGS_MASK = 0xf000
66} ot_tasktype; 67} ot_tasktype;
67 68
68typedef unsigned long ot_taskid; 69typedef unsigned long ot_taskid;
69 70
70int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ); 71int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype);
71void mutex_workqueue_canceltask( int64 sock ); 72void mutex_workqueue_canceltask(int64 sock);
72void mutex_workqueue_pushsuccess( ot_taskid taskid ); 73void mutex_workqueue_pushsuccess(ot_taskid taskid);
73ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ); 74ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype);
74int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovector ); 75int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovector);
75int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec); 76int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec);
76int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovector, int *is_partial ); 77int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovector, int *is_partial);
77 78
78#endif 79#endif
diff --git a/ot_stats.c b/ot_stats.c
index 66bed49..fa456c3 100644
--- a/ot_stats.c
+++ b/ot_stats.c
@@ -4,16 +4,16 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <stdlib.h>
8#include <arpa/inet.h> 7#include <arpa/inet.h>
9#include <sys/types.h> 8#include <inttypes.h>
10#include <sys/uio.h> 9#include <pthread.h>
11#include <sys/mman.h>
12#include <stdio.h> 10#include <stdio.h>
11#include <stdlib.h>
13#include <string.h> 12#include <string.h>
14#include <pthread.h> 13#include <sys/mman.h>
14#include <sys/types.h>
15#include <sys/uio.h>
15#include <unistd.h> 16#include <unistd.h>
16#include <inttypes.h>
17#ifdef WANT_SYSLOGS 17#ifdef WANT_SYSLOGS
18#include <syslog.h> 18#include <syslog.h>
19#endif 19#endif
@@ -25,61 +25,63 @@
25#include "ip6.h" 25#include "ip6.h"
26 26
27/* Opentracker */ 27/* Opentracker */
28#include "trackerlogic.h" 28#include "ot_accesslist.h"
29#include "ot_mutex.h"
30#include "ot_iovec.h" 29#include "ot_iovec.h"
30#include "ot_mutex.h"
31#include "ot_stats.h" 31#include "ot_stats.h"
32#include "ot_accesslist.h" 32#include "trackerlogic.h"
33 33
34#ifndef NO_FULLSCRAPE_LOGGING 34#ifndef NO_FULLSCRAPE_LOGGING
35#define LOG_TO_STDERR( ... ) fprintf( stderr, __VA_ARGS__ ) 35#define LOG_TO_STDERR(...) fprintf(stderr, __VA_ARGS__)
36#else 36#else
37#define LOG_TO_STDERR( ... ) 37#define LOG_TO_STDERR(...)
38#endif 38#endif
39 39
40/* Forward declaration */ 40/* Forward declaration */
41static void stats_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ); 41static void stats_make(int *iovec_entries, struct iovec **iovector, ot_tasktype mode);
42#define OT_STATS_TMPSIZE 8192 42#define OT_STATS_TMPSIZE 8192
43 43
44/* Clumsy counters... to be rethought */ 44/* Clumsy counters... to be rethought */
45static unsigned long long ot_overall_tcp_connections = 0; 45static unsigned long long ot_overall_tcp_connections;
46static unsigned long long ot_overall_udp_connections = 0; 46static unsigned long long ot_overall_udp_connections;
47static unsigned long long ot_overall_tcp_successfulannounces = 0; 47static unsigned long long ot_overall_tcp_successfulannounces;
48static unsigned long long ot_overall_udp_successfulannounces = 0; 48static unsigned long long ot_overall_udp_successfulannounces;
49static unsigned long long ot_overall_tcp_successfulscrapes = 0; 49static unsigned long long ot_overall_tcp_successfulscrapes;
50static unsigned long long ot_overall_udp_successfulscrapes = 0; 50static unsigned long long ot_overall_udp_successfulscrapes;
51static unsigned long long ot_overall_udp_connectionidmissmatches = 0; 51static unsigned long long ot_overall_udp_connectionidmissmatches;
52static unsigned long long ot_overall_tcp_connects = 0; 52static unsigned long long ot_overall_tcp_connects;
53static unsigned long long ot_overall_udp_connects = 0; 53static unsigned long long ot_overall_udp_connects;
54static unsigned long long ot_overall_completed = 0; 54static unsigned long long ot_overall_completed;
55static unsigned long long ot_full_scrape_count = 0; 55static unsigned long long ot_full_scrape_count;
56static unsigned long long ot_full_scrape_request_count = 0; 56static unsigned long long ot_full_scrape_request_count;
57static unsigned long long ot_full_scrape_size = 0; 57static unsigned long long ot_full_scrape_size;
58static unsigned long long ot_failed_request_counts[CODE_HTTPERROR_COUNT]; 58static unsigned long long ot_failed_request_counts[CODE_HTTPERROR_COUNT];
59static char * ot_failed_request_names[] = { "302 Redirect", "400 Parse Error", "400 Invalid Parameter", "400 Invalid Parameter (compact=0)", "400 Not Modest", "402 Payment Required", "403 Access Denied", "404 Not found", "500 Internal Server Error" }; 59static char *ot_failed_request_names[] = {
60 "302 Redirect", "400 Parse Error", "400 Invalid Parameter", "400 Invalid Parameter (compact=0)", "400 Not Modest",
61 "402 Payment Required", "403 Access Denied", "404 Not found", "500 Internal Server Error"};
60static unsigned long long ot_renewed[OT_PEER_TIMEOUT]; 62static unsigned long long ot_renewed[OT_PEER_TIMEOUT];
61static unsigned long long ot_overall_sync_count; 63static unsigned long long ot_overall_sync_count;
62static unsigned long long ot_overall_stall_count; 64static unsigned long long ot_overall_stall_count;
63 65
64static time_t ot_start_time; 66static time_t ot_start_time;
65 67
66#define STATS_NETWORK_NODE_BITWIDTH 4 68#define STATS_NETWORK_NODE_BITWIDTH 4
67#define STATS_NETWORK_NODE_COUNT (1<<STATS_NETWORK_NODE_BITWIDTH) 69#define STATS_NETWORK_NODE_COUNT (1 << STATS_NETWORK_NODE_BITWIDTH)
68 70
69#define __BYTE(P,D) (((uint8_t*)P)[D/8]) 71#define __BYTE(P, D) (((uint8_t *)P)[D / 8])
70#define __MSK (STATS_NETWORK_NODE_COUNT-1) 72#define __MSK (STATS_NETWORK_NODE_COUNT - 1)
71#define __SHFT(D) ((D^STATS_NETWORK_NODE_BITWIDTH)&STATS_NETWORK_NODE_BITWIDTH) 73#define __SHFT(D) ((D ^ STATS_NETWORK_NODE_BITWIDTH) & STATS_NETWORK_NODE_BITWIDTH)
72 74
73#define __LDR(P,D) ((__BYTE((P),(D))>>__SHFT((D)))&__MSK) 75#define __LDR(P, D) ((__BYTE((P), (D)) >> __SHFT((D))) & __MSK)
74#define __STR(P,D,V) __BYTE((P),(D))=(__BYTE((P),(D))&~(__MSK<<__SHFT((D))))|((V)<<__SHFT((D))) 76#define __STR(P, D, V) __BYTE((P), (D)) = (__BYTE((P), (D)) & ~(__MSK << __SHFT((D)))) | ((V) << __SHFT((D)))
75 77
76#if 0 78#if 0
77// XXX 79// XXX
78#define STATS_NETWORK_NODE_MAXDEPTH (68-STATS_NETWORK_NODE_BITWIDTH) 80#define STATS_NETWORK_NODE_MAXDEPTH (68 - STATS_NETWORK_NODE_BITWIDTH)
79#define STATS_NETWORK_NODE_LIMIT (48-STATS_NETWORK_NODE_BITWIDTH) 81#define STATS_NETWORK_NODE_LIMIT (48 - STATS_NETWORK_NODE_BITWIDTH)
80#endif 82#endif
81#define STATS_NETWORK_NODE_MAXDEPTH (28-STATS_NETWORK_NODE_BITWIDTH) 83#define STATS_NETWORK_NODE_MAXDEPTH (28 - STATS_NETWORK_NODE_BITWIDTH)
82#define STATS_NETWORK_NODE_LIMIT (24-STATS_NETWORK_NODE_BITWIDTH) 84#define STATS_NETWORK_NODE_LIMIT (24 - STATS_NETWORK_NODE_BITWIDTH)
83 85
84typedef union stats_network_node stats_network_node; 86typedef union stats_network_node stats_network_node;
85union stats_network_node { 87union stats_network_node {
@@ -91,119 +93,123 @@ union stats_network_node {
91static stats_network_node *stats_network_counters_root; 93static stats_network_node *stats_network_counters_root;
92#endif 94#endif
93 95
94static int stat_increase_network_count( stats_network_node **pnode, int depth, uintptr_t ip ) { 96static int stat_increase_network_count(stats_network_node **pnode, int depth, uintptr_t ip) {
95 int foo = __LDR(ip,depth); 97 int foo = __LDR(ip, depth);
96 stats_network_node *node; 98 stats_network_node *node;
97 99
98 if( !*pnode ) { 100 if (!*pnode) {
99 *pnode = malloc( sizeof( stats_network_node ) ); 101 *pnode = malloc(sizeof(stats_network_node));
100 if( !*pnode ) 102 if (!*pnode)
101 return -1; 103 return -1;
102 memset( *pnode, 0, sizeof( stats_network_node ) ); 104 memset(*pnode, 0, sizeof(stats_network_node));
103 } 105 }
104 node = *pnode; 106 node = *pnode;
105 107
106 if( depth < STATS_NETWORK_NODE_MAXDEPTH ) 108 if (depth < STATS_NETWORK_NODE_MAXDEPTH)
107 return stat_increase_network_count( node->children + foo, depth+STATS_NETWORK_NODE_BITWIDTH, ip ); 109 return stat_increase_network_count(node->children + foo, depth + STATS_NETWORK_NODE_BITWIDTH, ip);
108 110
109 node->counters[ foo ]++; 111 node->counters[foo]++;
110 return 0; 112 return 0;
111} 113}
112 114
113static int stats_shift_down_network_count( stats_network_node **node, int depth, int shift ) { 115static int stats_shift_down_network_count(stats_network_node **node, int depth, int shift) {
114 int i, rest = 0; 116 int i, rest = 0;
115 117
116 if( !*node ) 118 if (!*node)
117 return 0; 119 return 0;
118 120
119 for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) 121 for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i)
120 if( depth < STATS_NETWORK_NODE_MAXDEPTH ) 122 if (depth < STATS_NETWORK_NODE_MAXDEPTH)
121 rest += stats_shift_down_network_count( (*node)->children + i, depth+STATS_NETWORK_NODE_BITWIDTH, shift ); 123 rest += stats_shift_down_network_count((*node)->children + i, depth + STATS_NETWORK_NODE_BITWIDTH, shift);
122 else 124 else
123 rest += (*node)->counters[i] >>= shift; 125 rest += (*node)->counters[i] >>= shift;
124 126
125 if( !rest ) { 127 if (!rest) {
126 free( *node ); 128 free(*node);
127 *node = NULL; 129 *node = NULL;
128 } 130 }
129 131
130 return rest; 132 return rest;
131} 133}
132 134
133static size_t stats_get_highscore_networks( stats_network_node *node, int depth, ot_ip6 node_value, size_t *scores, ot_ip6 *networks, int network_count, int limit ) { 135static size_t stats_get_highscore_networks(stats_network_node *node, int depth, ot_ip6 node_value, size_t *scores, ot_ip6 *networks, int network_count,
136 int limit) {
134 size_t score = 0; 137 size_t score = 0;
135 int i; 138 int i;
136 139
137 if( !node ) return 0; 140 if (!node)
141 return 0;
138 142
139 if( depth < limit ) { 143 if (depth < limit) {
140 for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) 144 for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i)
141 if( node->children[i] ) { 145 if (node->children[i]) {
142 __STR(node_value,depth,i); 146 __STR(node_value, depth, i);
143 score += stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); 147 score += stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit);
144 } 148 }
145 return score; 149 return score;
146 } 150 }
147 151
148 if( depth > limit && depth < STATS_NETWORK_NODE_MAXDEPTH ) { 152 if (depth > limit && depth < STATS_NETWORK_NODE_MAXDEPTH) {
149 for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) 153 for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i)
150 if( node->children[i] ) 154 if (node->children[i])
151 score += stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); 155 score += stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit);
152 return score; 156 return score;
153 } 157 }
154 158
155 if( depth > limit && depth == STATS_NETWORK_NODE_MAXDEPTH ) { 159 if (depth > limit && depth == STATS_NETWORK_NODE_MAXDEPTH) {
156 for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) 160 for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i)
157 score += node->counters[i]; 161 score += node->counters[i];
158 return score; 162 return score;
159 } 163 }
160 164
161 /* if( depth == limit ) */ 165 /* if( depth == limit ) */
162 for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) { 166 for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) {
163 int j=1; 167 int j = 1;
164 size_t node_score; 168 size_t node_score;
165 169
166 if( depth == STATS_NETWORK_NODE_MAXDEPTH ) 170 if (depth == STATS_NETWORK_NODE_MAXDEPTH)
167 node_score = node->counters[i]; 171 node_score = node->counters[i];
168 else 172 else
169 node_score = stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); 173 node_score = stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit);
170 174
171 score += node_score; 175 score += node_score;
172 176
173 if( node_score <= scores[0] ) continue; 177 if (node_score <= scores[0])
178 continue;
174 179
175 __STR(node_value,depth,i); 180 __STR(node_value, depth, i);
176 while( j < network_count && node_score > scores[j] ) ++j; 181 while (j < network_count && node_score > scores[j])
182 ++j;
177 --j; 183 --j;
178 184
179 memcpy( scores, scores + 1, j * sizeof( *scores ) ); 185 memcpy(scores, scores + 1, j * sizeof(*scores));
180 memcpy( networks, networks + 1, j * sizeof( *networks ) ); 186 memcpy(networks, networks + 1, j * sizeof(*networks));
181 scores[ j ] = node_score; 187 scores[j] = node_score;
182 memcpy( networks + j, node_value, sizeof( *networks ) ); 188 memcpy(networks + j, node_value, sizeof(*networks));
183 } 189 }
184 190
185 return score; 191 return score;
186} 192}
187 193
188static size_t stats_return_busy_networks( char * reply, stats_network_node *tree, int amount, int limit ) { 194static size_t stats_return_busy_networks(char *reply, stats_network_node *tree, int amount, int limit) {
189 ot_ip6 networks[amount]; 195 ot_ip6 networks[amount];
190 ot_ip6 node_value; 196 ot_ip6 node_value;
191 size_t scores[amount]; 197 size_t scores[amount];
192 int i; 198 int i;
193 char * r = reply; 199 char *r = reply;
194 200
195 memset( scores, 0, sizeof( scores ) ); 201 memset(scores, 0, sizeof(scores));
196 memset( networks, 0, sizeof( networks ) ); 202 memset(networks, 0, sizeof(networks));
197 memset( node_value, 0, sizeof( node_value ) ); 203 memset(node_value, 0, sizeof(node_value));
198 204
199 stats_get_highscore_networks( tree, 0, node_value, scores, networks, amount, limit ); 205 stats_get_highscore_networks(tree, 0, node_value, scores, networks, amount, limit);
200 206
201 r += sprintf( r, "Networks, limit /%d:\n", limit+STATS_NETWORK_NODE_BITWIDTH ); 207 r += sprintf(r, "Networks, limit /%d:\n", limit + STATS_NETWORK_NODE_BITWIDTH);
202 for( i=amount-1; i>=0; --i) { 208 for (i = amount - 1; i >= 0; --i) {
203 if( scores[i] ) { 209 if (scores[i]) {
204 r += sprintf( r, "%08zd: ", scores[i] ); 210 r += sprintf(r, "%08zd: ", scores[i]);
205//#ifdef WANT_V6 211 // #ifdef WANT_V6
206 r += fmt_ip6c( r, networks[i] ); 212 r += fmt_ip6c(r, networks[i]);
207#if 0 213#if 0
208 // XXX 214 // XXX
209 r += fmt_ip4( r, networks[i]); 215 r += fmt_ip4( r, networks[i]);
@@ -216,66 +222,66 @@ static size_t stats_return_busy_networks( char * reply, stats_network_node *tree
216 return r - reply; 222 return r - reply;
217} 223}
218 224
219static size_t stats_slash24s_txt( char *reply, size_t amount ) { 225static size_t stats_slash24s_txt(char *reply, size_t amount) {
220 stats_network_node *slash24s_network_counters_root = NULL; 226 stats_network_node *slash24s_network_counters_root = NULL;
221 char *r=reply; 227 char *r = reply;
222 int bucket; 228 int bucket;
223 size_t i, peer_size = OT_PEER_SIZE4; 229 size_t i, peer_size = OT_PEER_SIZE4;
224 230
225 for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 231 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
226 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 232 ot_vector *torrents_list = mutex_bucket_lock(bucket);
227 for( i=0; i<torrents_list->size; ++i ) { 233 for (i = 0; i < torrents_list->size; ++i) {
228 ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[i] ).peer_list4; 234 ot_peerlist *peer_list = (((ot_torrent *)(torrents_list->data))[i]).peer_list4;
229 ot_vector *bucket_list = &peer_list->peers; 235 ot_vector *bucket_list = &peer_list->peers;
230 int num_buckets = 1; 236 int num_buckets = 1;
231 237
232 if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { 238 if (OT_PEERLIST_HASBUCKETS(peer_list)) {
233 num_buckets = bucket_list->size; 239 num_buckets = bucket_list->size;
234 bucket_list = (ot_vector *)bucket_list->data; 240 bucket_list = (ot_vector *)bucket_list->data;
235 } 241 }
236 242
237 while( num_buckets-- ) { 243 while (num_buckets--) {
238 ot_peer *peers = (ot_peer*)bucket_list->data; 244 ot_peer *peers = (ot_peer *)bucket_list->data;
239 size_t numpeers = bucket_list->size; 245 size_t numpeers = bucket_list->size;
240 while( numpeers-- ) { 246 while (numpeers--) {
241 if( stat_increase_network_count( &slash24s_network_counters_root, 0, (uintptr_t)(peers) ) ) 247 if (stat_increase_network_count(&slash24s_network_counters_root, 0, (uintptr_t)(peers)))
242 goto bailout_unlock; 248 goto bailout_unlock;
243 peers += peer_size; 249 peers += peer_size;
244 } 250 }
245 ++bucket_list; 251 ++bucket_list;
246 } 252 }
247 } 253 }
248 mutex_bucket_unlock( bucket, 0 ); 254 mutex_bucket_unlock(bucket, 0);
249 if( !g_opentracker_running ) 255 if (!g_opentracker_running)
250 goto bailout_error; 256 goto bailout_error;
251 } 257 }
252 258
253 /* The tree is built. Now analyze */ 259 /* The tree is built. Now analyze */
254 r += stats_return_busy_networks( r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_MAXDEPTH ); 260 r += stats_return_busy_networks(r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_MAXDEPTH);
255 r += stats_return_busy_networks( r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_LIMIT ); 261 r += stats_return_busy_networks(r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_LIMIT);
256 goto success; 262 goto success;
257 263
258bailout_unlock: 264bailout_unlock:
259 mutex_bucket_unlock( bucket, 0 ); 265 mutex_bucket_unlock(bucket, 0);
260bailout_error: 266bailout_error:
261 r = reply; 267 r = reply;
262success: 268success:
263 stats_shift_down_network_count( &slash24s_network_counters_root, 0, sizeof(int)*8-1 ); 269 stats_shift_down_network_count(&slash24s_network_counters_root, 0, sizeof(int) * 8 - 1);
264 270
265 return r-reply; 271 return r - reply;
266} 272}
267 273
268#ifdef WANT_SPOT_WOODPECKER 274#ifdef WANT_SPOT_WOODPECKER
269static stats_network_node *stats_woodpeckers_tree; 275static stats_network_node *stats_woodpeckers_tree;
270static pthread_mutex_t g_woodpeckers_mutex = PTHREAD_MUTEX_INITIALIZER; 276static pthread_mutex_t g_woodpeckers_mutex = PTHREAD_MUTEX_INITIALIZER;
271 277
272static size_t stats_return_woodpeckers( char * reply, int amount ) { 278static size_t stats_return_woodpeckers(char *reply, int amount) {
273 char * r = reply; 279 char *r = reply;
274 280
275 pthread_mutex_lock( &g_woodpeckers_mutex ); 281 pthread_mutex_lock(&g_woodpeckers_mutex);
276 r += stats_return_busy_networks( r, stats_woodpeckers_tree, amount, STATS_NETWORK_NODE_MAXDEPTH ); 282 r += stats_return_busy_networks(r, stats_woodpeckers_tree, amount, STATS_NETWORK_NODE_MAXDEPTH);
277 pthread_mutex_unlock( &g_woodpeckers_mutex ); 283 pthread_mutex_unlock(&g_woodpeckers_mutex);
278 return r-reply; 284 return r - reply;
279} 285}
280#endif 286#endif
281 287
@@ -285,8 +291,8 @@ typedef struct {
285 unsigned long long seed_count; 291 unsigned long long seed_count;
286} torrent_stats; 292} torrent_stats;
287 293
288static int torrent_statter( ot_torrent *torrent, uintptr_t data ) { 294static int torrent_statter(ot_torrent *torrent, uintptr_t data) {
289 torrent_stats *stats = (torrent_stats*)data; 295 torrent_stats *stats = (torrent_stats *)data;
290 stats->torrent_count++; 296 stats->torrent_count++;
291 stats->peer_count += torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; 297 stats->peer_count += torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
292 stats->seed_count += torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; 298 stats->seed_count += torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
@@ -294,491 +300,480 @@ static int torrent_statter( ot_torrent *torrent, uintptr_t data ) {
294} 300}
295 301
296/* Converter function from memory to human readable hex strings */ 302/* Converter function from memory to human readable hex strings */
297static char*to_hex(char*d,uint8_t*s){char*m="0123456789ABCDEF";char *t=d;char*e=d+40;while(d<e){*d++=m[*s>>4];*d++=m[*s++&15];}*d=0;return t;} 303static char *to_hex(char *d, uint8_t *s) {
304 char *m = "0123456789ABCDEF";
305 char *t = d;
306 char *e = d + 40;
307 while (d < e) {
308 *d++ = m[*s >> 4];
309 *d++ = m[*s++ & 15];
310 }
311 *d = 0;
312 return t;
313}
298 314
299typedef struct { size_t val; ot_hash hash; } ot_record; 315typedef struct {
316 size_t val;
317 ot_hash hash;
318} ot_record;
300 319
301/* Fetches stats from tracker */ 320/* Fetches stats from tracker */
302size_t stats_top_txt( char * reply, int amount ) { 321size_t stats_top_txt(char *reply, int amount) {
303 size_t j; 322 size_t j;
304 ot_record top100s[100], top100c[100]; 323 ot_record top100s[100], top100c[100];
305 char *r = reply, hex_out[42]; 324 char *r = reply, hex_out[42];
306 int idx, bucket; 325 int idx, bucket;
307 326
308 if( amount > 100 ) 327 if (amount > 100)
309 amount = 100; 328 amount = 100;
310 329
311 byte_zero( top100s, sizeof( top100s ) ); 330 byte_zero(top100s, sizeof(top100s));
312 byte_zero( top100c, sizeof( top100c ) ); 331 byte_zero(top100c, sizeof(top100c));
313 332
314 for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 333 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
315 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 334 ot_vector *torrents_list = mutex_bucket_lock(bucket);
316 for( j=0; j<torrents_list->size; ++j ) { 335 for (j = 0; j < torrents_list->size; ++j) {
317 ot_torrent *torrent = (ot_torrent*)(torrents_list->data) + j; 336 ot_torrent *torrent = (ot_torrent *)(torrents_list->data) + j;
318 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; 337 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
319 size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; 338 size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
320 idx = amount - 1; 339 idx = amount - 1;
321 while( (idx >= 0) && ( peer_count > top100c[idx].val ) ) 340 while ((idx >= 0) && (peer_count > top100c[idx].val))
322 --idx; 341 --idx;
323 if ( idx++ != amount - 1 ) { 342 if (idx++ != amount - 1) {
324 memmove( top100c + idx + 1, top100c + idx, ( amount - 1 - idx ) * sizeof( ot_record ) ); 343 memmove(top100c + idx + 1, top100c + idx, (amount - 1 - idx) * sizeof(ot_record));
325 memcpy( &top100c[idx].hash, &torrent->hash, sizeof(ot_hash)); 344 memcpy(&top100c[idx].hash, &torrent->hash, sizeof(ot_hash));
326 top100c[idx].val = peer_count; 345 top100c[idx].val = peer_count;
327 } 346 }
328 idx = amount - 1; 347 idx = amount - 1;
329 while( (idx >= 0) && ( seed_count > top100s[idx].val ) ) 348 while ((idx >= 0) && (seed_count > top100s[idx].val))
330 --idx; 349 --idx;
331 if ( idx++ != amount - 1 ) { 350 if (idx++ != amount - 1) {
332 memmove( top100s + idx + 1, top100s + idx, ( amount - 1 - idx ) * sizeof( ot_record ) ); 351 memmove(top100s + idx + 1, top100s + idx, (amount - 1 - idx) * sizeof(ot_record));
333 memcpy( &top100s[idx].hash, &torrent->hash, sizeof(ot_hash)); 352 memcpy(&top100s[idx].hash, &torrent->hash, sizeof(ot_hash));
334 top100s[idx].val = seed_count; 353 top100s[idx].val = seed_count;
335 } 354 }
336 } 355 }
337 mutex_bucket_unlock( bucket, 0 ); 356 mutex_bucket_unlock(bucket, 0);
338 if( !g_opentracker_running ) 357 if (!g_opentracker_running)
339 return 0; 358 return 0;
340 } 359 }
341 360
342 r += sprintf( r, "Top %d torrents by peers:\n", amount ); 361 r += sprintf(r, "Top %d torrents by peers:\n", amount);
343 for( idx=0; idx<amount; ++idx ) 362 for (idx = 0; idx < amount; ++idx)
344 if( top100c[idx].val ) 363 if (top100c[idx].val)
345 r += sprintf( r, "\t%zd\t%s\n", top100c[idx].val, to_hex( hex_out, top100c[idx].hash) ); 364 r += sprintf(r, "\t%zd\t%s\n", top100c[idx].val, to_hex(hex_out, top100c[idx].hash));
346 r += sprintf( r, "Top %d torrents by seeds:\n", amount ); 365 r += sprintf(r, "Top %d torrents by seeds:\n", amount);
347 for( idx=0; idx<amount; ++idx ) 366 for (idx = 0; idx < amount; ++idx)
348 if( top100s[idx].val ) 367 if (top100s[idx].val)
349 r += sprintf( r, "\t%zd\t%s\n", top100s[idx].val, to_hex( hex_out, top100s[idx].hash) ); 368 r += sprintf(r, "\t%zd\t%s\n", top100s[idx].val, to_hex(hex_out, top100s[idx].hash));
350 369
351 return r - reply; 370 return r - reply;
352} 371}
353 372
354static unsigned long events_per_time( unsigned long long events, time_t t ) { 373static unsigned long events_per_time(unsigned long long events, time_t t) { return events / ((unsigned int)t ? (unsigned int)t : 1); }
355 return events / ( (unsigned int)t ? (unsigned int)t : 1 );
356}
357 374
358static size_t stats_connections_mrtg( char * reply ) { 375static size_t stats_connections_mrtg(char *reply) {
359 ot_time t = time( NULL ) - ot_start_time; 376 ot_time t = time(NULL) - ot_start_time;
360 return sprintf( reply, 377 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.",
361 "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", 378 ot_overall_tcp_connections + ot_overall_udp_connections,
362 ot_overall_tcp_connections+ot_overall_udp_connections, 379 ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, (int)t, (int)(t / 3600),
363 ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, 380 events_per_time(ot_overall_tcp_connections + ot_overall_udp_connections, t),
364 (int)t, 381 events_per_time(ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, t));
365 (int)(t / 3600),
366 events_per_time( ot_overall_tcp_connections+ot_overall_udp_connections, t ),
367 events_per_time( ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, t )
368 );
369} 382}
370 383
371static size_t stats_udpconnections_mrtg( char * reply ) { 384static size_t stats_udpconnections_mrtg(char *reply) {
372 ot_time t = time( NULL ) - ot_start_time; 385 ot_time t = time(NULL) - ot_start_time;
373 return sprintf( reply, 386 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker udp4 stats, %lu conns/s :: %lu success/s.", ot_overall_udp_connections,
374 "%llu\n%llu\n%i seconds (%i hours)\nopentracker udp4 stats, %lu conns/s :: %lu success/s.", 387 ot_overall_udp_successfulannounces + ot_overall_udp_connects, (int)t, (int)(t / 3600), events_per_time(ot_overall_udp_connections, t),
375 ot_overall_udp_connections, 388 events_per_time(ot_overall_udp_successfulannounces + ot_overall_udp_connects, t));
376 ot_overall_udp_successfulannounces+ot_overall_udp_connects,
377 (int)t,
378 (int)(t / 3600),
379 events_per_time( ot_overall_udp_connections, t ),
380 events_per_time( ot_overall_udp_successfulannounces+ot_overall_udp_connects, t )
381 );
382} 389}
383 390
384static size_t stats_tcpconnections_mrtg( char * reply ) { 391static size_t stats_tcpconnections_mrtg(char *reply) {
385 time_t t = time( NULL ) - ot_start_time; 392 time_t t = time(NULL) - ot_start_time;
386 return sprintf( reply, 393 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker tcp4 stats, %lu conns/s :: %lu success/s.", ot_overall_tcp_connections,
387 "%llu\n%llu\n%i seconds (%i hours)\nopentracker tcp4 stats, %lu conns/s :: %lu success/s.", 394 ot_overall_tcp_successfulannounces, (int)t, (int)(t / 3600), events_per_time(ot_overall_tcp_connections, t),
388 ot_overall_tcp_connections, 395 events_per_time(ot_overall_tcp_successfulannounces, t));
389 ot_overall_tcp_successfulannounces,
390 (int)t,
391 (int)(t / 3600),
392 events_per_time( ot_overall_tcp_connections, t ),
393 events_per_time( ot_overall_tcp_successfulannounces, t )
394 );
395} 396}
396 397
397static size_t stats_scrape_mrtg( char * reply ) { 398static size_t stats_scrape_mrtg(char *reply) {
398 time_t t = time( NULL ) - ot_start_time; 399 time_t t = time(NULL) - ot_start_time;
399 return sprintf( reply, 400 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker scrape stats, %lu scrape/s (tcp and udp)", ot_overall_tcp_successfulscrapes,
400 "%llu\n%llu\n%i seconds (%i hours)\nopentracker scrape stats, %lu scrape/s (tcp and udp)", 401 ot_overall_udp_successfulscrapes, (int)t, (int)(t / 3600),
401 ot_overall_tcp_successfulscrapes, 402 events_per_time((ot_overall_tcp_successfulscrapes + ot_overall_udp_successfulscrapes), t));
402 ot_overall_udp_successfulscrapes,
403 (int)t,
404 (int)(t / 3600),
405 events_per_time( (ot_overall_tcp_successfulscrapes+ot_overall_udp_successfulscrapes), t )
406 );
407} 403}
408 404
409static size_t stats_fullscrapes_mrtg( char * reply ) { 405static size_t stats_fullscrapes_mrtg(char *reply) {
410 ot_time t = time( NULL ) - ot_start_time; 406 ot_time t = time(NULL) - ot_start_time;
411 return sprintf( reply, 407 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker full scrape stats, %lu conns/s :: %lu bytes/s.", ot_full_scrape_count * 1000,
412 "%llu\n%llu\n%i seconds (%i hours)\nopentracker full scrape stats, %lu conns/s :: %lu bytes/s.", 408 ot_full_scrape_size, (int)t, (int)(t / 3600), events_per_time(ot_full_scrape_count, t), events_per_time(ot_full_scrape_size, t));
413 ot_full_scrape_count * 1000,
414 ot_full_scrape_size,
415 (int)t,
416 (int)(t / 3600),
417 events_per_time( ot_full_scrape_count, t ),
418 events_per_time( ot_full_scrape_size, t )
419 );
420} 409}
421 410
422static size_t stats_peers_mrtg( char * reply ) { 411static size_t stats_peers_mrtg(char *reply) {
423 torrent_stats stats = {0,0,0}; 412 torrent_stats stats = {0, 0, 0};
424 413
425 iterate_all_torrents( torrent_statter, (uintptr_t)&stats ); 414 iterate_all_torrents(torrent_statter, (uintptr_t)&stats);
426 415
427 return sprintf( reply, "%llu\n%llu\nopentracker serving %llu torrents\nopentracker", 416 return sprintf(reply, "%llu\n%llu\nopentracker serving %llu torrents\nopentracker", stats.peer_count, stats.seed_count, stats.torrent_count);
428 stats.peer_count,
429 stats.seed_count,
430 stats.torrent_count
431 );
432} 417}
433 418
434static size_t stats_torrents_mrtg( char * reply ) 419static size_t stats_torrents_mrtg(char *reply) {
435{
436 size_t torrent_count = mutex_get_torrent_count(); 420 size_t torrent_count = mutex_get_torrent_count();
437 421
438 return sprintf( reply, "%zd\n%zd\nopentracker serving %zd torrents\nopentracker", 422 return sprintf(reply, "%zd\n%zd\nopentracker serving %zd torrents\nopentracker", torrent_count, (size_t)0, torrent_count);
439 torrent_count,
440 (size_t)0,
441 torrent_count
442 );
443} 423}
444 424
445static size_t stats_httperrors_txt ( char * reply ) { 425static size_t stats_httperrors_txt(char *reply) {
446 return sprintf( reply, "302 RED %llu\n400 ... %llu\n400 PAR %llu\n400 COM %llu\n403 IP %llu\n404 INV %llu\n500 SRV %llu\n", 426 return sprintf(reply, "302 RED %llu\n400 ... %llu\n400 PAR %llu\n400 COM %llu\n403 IP %llu\n404 INV %llu\n500 SRV %llu\n", ot_failed_request_counts[0],
447 ot_failed_request_counts[0], ot_failed_request_counts[1], ot_failed_request_counts[2], 427 ot_failed_request_counts[1], ot_failed_request_counts[2], ot_failed_request_counts[3], ot_failed_request_counts[4],
448 ot_failed_request_counts[3], ot_failed_request_counts[4], ot_failed_request_counts[5], 428 ot_failed_request_counts[5], ot_failed_request_counts[6]);
449 ot_failed_request_counts[6] );
450} 429}
451 430
452static size_t stats_return_renew_bucket( char * reply ) { 431static size_t stats_return_renew_bucket(char *reply) {
453 char *r = reply; 432 char *r = reply;
454 int i; 433 int i;
455 434
456 for( i=0; i<OT_PEER_TIMEOUT; ++i ) 435 for (i = 0; i < OT_PEER_TIMEOUT; ++i)
457 r+=sprintf(r,"%02i %llu\n", i, ot_renewed[i] ); 436 r += sprintf(r, "%02i %llu\n", i, ot_renewed[i]);
458 return r - reply; 437 return r - reply;
459} 438}
460 439
461static size_t stats_return_sync_mrtg( char * reply ) { 440static size_t stats_return_sync_mrtg(char *reply) {
462 ot_time t = time( NULL ) - ot_start_time; 441 ot_time t = time(NULL) - ot_start_time;
463 return sprintf( reply, 442 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", ot_overall_sync_count, 0LL, (int)t,
464 "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", 443 (int)(t / 3600), events_per_time(ot_overall_tcp_connections + ot_overall_udp_connections, t),
465 ot_overall_sync_count, 444 events_per_time(ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, t));
466 0LL,
467 (int)t,
468 (int)(t / 3600),
469 events_per_time( ot_overall_tcp_connections+ot_overall_udp_connections, t ),
470 events_per_time( ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, t )
471 );
472} 445}
473 446
474static size_t stats_return_completed_mrtg( char * reply ) { 447static size_t stats_return_completed_mrtg(char *reply) {
475 ot_time t = time( NULL ) - ot_start_time; 448 ot_time t = time(NULL) - ot_start_time;
476 449
477 return sprintf( reply, 450 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker, %lu completed/h.", ot_overall_completed, 0LL, (int)t, (int)(t / 3600),
478 "%llu\n%llu\n%i seconds (%i hours)\nopentracker, %lu completed/h.", 451 events_per_time(ot_overall_completed, t / 3600));
479 ot_overall_completed,
480 0LL,
481 (int)t,
482 (int)(t / 3600),
483 events_per_time( ot_overall_completed, t / 3600 )
484 );
485} 452}
486 453
487#ifdef WANT_LOG_NUMWANT 454#ifdef WANT_LOG_NUMWANT
488extern unsigned long long numwants[201]; 455extern unsigned long long numwants[201];
489static size_t stats_return_numwants( char * reply ) { 456static size_t stats_return_numwants(char *reply) {
490 char * r = reply; 457 char *r = reply;
491 int i; 458 int i;
492 for( i=0; i<=200; ++i ) 459 for (i = 0; i <= 200; ++i)
493 r += sprintf( r, "%03d => %lld\n", i, numwants[i] ); 460 r += sprintf(r, "%03d => %lld\n", i, numwants[i]);
494 return r-reply; 461 return r - reply;
495} 462}
496#endif 463#endif
497 464
498#ifdef WANT_FULLLOG_NETWORKS 465#ifdef WANT_FULLLOG_NETWORKS
499static void stats_return_fulllog( int *iovec_entries, struct iovec **iovector, char *r ) { 466static void stats_return_fulllog(int *iovec_entries, struct iovec **iovector, char *r) {
500 ot_log *loglist = g_logchain_first, *llnext; 467 ot_log *loglist = g_logchain_first, *llnext;
501 char * re = r + OT_STATS_TMPSIZE; 468 char *re = r + OT_STATS_TMPSIZE;
502 469
503 g_logchain_first = g_logchain_last = 0; 470 g_logchain_first = g_logchain_last = 0;
504 471
505 while( loglist ) { 472 while (loglist) {
506 if( r + ( loglist->size + 64 ) >= re ) { 473 if (r + (loglist->size + 64) >= re) {
507 r = iovec_fix_increase_or_free( iovec_entries, iovector, r, 32 * OT_STATS_TMPSIZE ); 474 r = iovec_fix_increase_or_free(iovec_entries, iovector, r, 32 * OT_STATS_TMPSIZE);
508 if( !r ) return; 475 if (!r)
476 return;
509 re = r + 32 * OT_STATS_TMPSIZE; 477 re = r + 32 * OT_STATS_TMPSIZE;
510 } 478 }
511 r += sprintf( r, "%08ld: ", loglist->time ); 479 r += sprintf(r, "%08ld: ", loglist->time);
512 r += fmt_ip6c( r, loglist->ip ); 480 r += fmt_ip6c(r, loglist->ip);
513 *r++ = '\n'; 481 *r++ = '\n';
514 memcpy( r, loglist->data, loglist->size ); 482 memcpy(r, loglist->data, loglist->size);
515 r += loglist->size; 483 r += loglist->size;
516 *r++ = '\n'; 484 *r++ = '\n';
517 *r++ = '*'; 485 *r++ = '*';
518 *r++ = '\n'; 486 *r++ = '\n';
519 *r++ = '\n'; 487 *r++ = '\n';
520 488
521 llnext = loglist->next; 489 llnext = loglist->next;
522 free( loglist->data ); 490 free(loglist->data);
523 free( loglist ); 491 free(loglist);
524 loglist = llnext; 492 loglist = llnext;
525 } 493 }
526 iovec_fixlast( iovec_entries, iovector, r ); 494 iovec_fixlast(iovec_entries, iovector, r);
527} 495}
528#endif 496#endif
529 497
530static size_t stats_return_everything( char * reply ) { 498static size_t stats_return_everything(char *reply) {
531 torrent_stats stats = {0,0,0}; 499 torrent_stats stats = {0, 0, 0};
532 int i; 500 int i;
533 char * r = reply; 501 char *r = reply;
534 502
535 iterate_all_torrents( torrent_statter, (uintptr_t)&stats ); 503 iterate_all_torrents(torrent_statter, (uintptr_t)&stats);
536 504
537 r += sprintf( r, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" ); 505 r += sprintf(r, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
538 r += sprintf( r, "<stats>\n" ); 506 r += sprintf(r, "<stats>\n");
539 r += sprintf( r, " <tracker_id>%" PRIu32 "</tracker_id>\n", g_tracker_id ); 507 r += sprintf(r, " <tracker_id>%" PRIu32 "</tracker_id>\n", g_tracker_id);
540 r += sprintf( r, " <version>\n" ); r += stats_return_tracker_version( r ); r += sprintf( r, " </version>\n" ); 508 r += sprintf(r, " <version>\n");
541 r += sprintf( r, " <uptime>%llu</uptime>\n", (unsigned long long)(time( NULL ) - ot_start_time) ); 509 r += stats_return_tracker_version(r);
542 r += sprintf( r, " <torrents>\n" ); 510 r += sprintf(r, " </version>\n");
543 r += sprintf( r, " <count_mutex>%zd</count_mutex>\n", mutex_get_torrent_count() ); 511 r += sprintf(r, " <uptime>%llu</uptime>\n", (unsigned long long)(time(NULL) - ot_start_time));
544 r += sprintf( r, " <count_iterator>%llu</count_iterator>\n", stats.torrent_count ); 512 r += sprintf(r, " <torrents>\n");
545 r += sprintf( r, " </torrents>\n" ); 513 r += sprintf(r, " <count_mutex>%zd</count_mutex>\n", mutex_get_torrent_count());
546 r += sprintf( r, " <peers>\n <count>%llu</count>\n </peers>\n", stats.peer_count ); 514 r += sprintf(r, " <count_iterator>%llu</count_iterator>\n", stats.torrent_count);
547 r += sprintf( r, " <seeds>\n <count>%llu</count>\n </seeds>\n", stats.seed_count ); 515 r += sprintf(r, " </torrents>\n");
548 r += sprintf( r, " <completed>\n <count>%llu</count>\n </completed>\n", ot_overall_completed ); 516 r += sprintf(r, " <peers>\n <count>%llu</count>\n </peers>\n", stats.peer_count);
549 r += sprintf( r, " <connections>\n" ); 517 r += sprintf(r, " <seeds>\n <count>%llu</count>\n </seeds>\n", stats.seed_count);
550 r += sprintf( r, " <tcp>\n <accept>%llu</accept>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n </tcp>\n", ot_overall_tcp_connections, ot_overall_tcp_successfulannounces, ot_overall_tcp_successfulscrapes ); 518 r += sprintf(r, " <completed>\n <count>%llu</count>\n </completed>\n", ot_overall_completed);
551 r += sprintf( r, " <udp>\n <overall>%llu</overall>\n <connect>%llu</connect>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n <missmatch>%llu</missmatch>\n </udp>\n", ot_overall_udp_connections, ot_overall_udp_connects, ot_overall_udp_successfulannounces, ot_overall_udp_successfulscrapes, ot_overall_udp_connectionidmissmatches ); 519 r += sprintf(r, " <connections>\n");
552 r += sprintf( r, " <livesync>\n <count>%llu</count>\n </livesync>\n", ot_overall_sync_count ); 520 r += sprintf(r, " <tcp>\n <accept>%llu</accept>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n </tcp>\n",
553 r += sprintf( r, " </connections>\n" ); 521 ot_overall_tcp_connections, ot_overall_tcp_successfulannounces, ot_overall_tcp_successfulscrapes);
554 r += sprintf( r, " <debug>\n" ); 522 r += sprintf(
555 r += sprintf( r, " <renew>\n" ); 523 r,
556 for( i=0; i<OT_PEER_TIMEOUT; ++i ) 524 " <udp>\n <overall>%llu</overall>\n <connect>%llu</connect>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n <missmatch>%llu</missmatch>\n </udp>\n",
557 r += sprintf( r, " <count interval=\"%02i\">%llu</count>\n", i, ot_renewed[i] ); 525 ot_overall_udp_connections, ot_overall_udp_connects, ot_overall_udp_successfulannounces, ot_overall_udp_successfulscrapes,
558 r += sprintf( r, " </renew>\n" ); 526 ot_overall_udp_connectionidmissmatches);
559 r += sprintf( r, " <http_error>\n" ); 527 r += sprintf(r, " <livesync>\n <count>%llu</count>\n </livesync>\n", ot_overall_sync_count);
560 for( i=0; i<CODE_HTTPERROR_COUNT; ++i ) 528 r += sprintf(r, " </connections>\n");
561 r += sprintf( r, " <count code=\"%s\">%llu</count>\n", ot_failed_request_names[i], ot_failed_request_counts[i] ); 529 r += sprintf(r, " <debug>\n");
562 r += sprintf( r, " </http_error>\n" ); 530 r += sprintf(r, " <renew>\n");
563 r += sprintf( r, " <mutex_stall>\n <count>%llu</count>\n </mutex_stall>\n", ot_overall_stall_count ); 531 for (i = 0; i < OT_PEER_TIMEOUT; ++i)
564 r += sprintf( r, " </debug>\n" ); 532 r += sprintf(r, " <count interval=\"%02i\">%llu</count>\n", i, ot_renewed[i]);
565 r += sprintf( r, "</stats>" ); 533 r += sprintf(r, " </renew>\n");
534 r += sprintf(r, " <http_error>\n");
535 for (i = 0; i < CODE_HTTPERROR_COUNT; ++i)
536 r += sprintf(r, " <count code=\"%s\">%llu</count>\n", ot_failed_request_names[i], ot_failed_request_counts[i]);
537 r += sprintf(r, " </http_error>\n");
538 r += sprintf(r, " <mutex_stall>\n <count>%llu</count>\n </mutex_stall>\n", ot_overall_stall_count);
539 r += sprintf(r, " </debug>\n");
540 r += sprintf(r, "</stats>");
566 return r - reply; 541 return r - reply;
567} 542}
568 543
569extern const char 544extern const char *g_version_opentracker_c, *g_version_accesslist_c, *g_version_clean_c, *g_version_fullscrape_c, *g_version_http_c, *g_version_iovec_c,
570*g_version_opentracker_c, *g_version_accesslist_c, *g_version_clean_c, *g_version_fullscrape_c, *g_version_http_c, 545 *g_version_mutex_c, *g_version_stats_c, *g_version_udp_c, *g_version_vector_c, *g_version_scan_urlencoded_query_c, *g_version_trackerlogic_c,
571*g_version_iovec_c, *g_version_mutex_c, *g_version_stats_c, *g_version_udp_c, *g_version_vector_c, 546 *g_version_livesync_c, *g_version_rijndael_c;
572*g_version_scan_urlencoded_query_c, *g_version_trackerlogic_c, *g_version_livesync_c, *g_version_rijndael_c; 547
573 548size_t stats_return_tracker_version(char *reply) {
574size_t stats_return_tracker_version( char *reply ) { 549 return sprintf(reply, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s", g_version_opentracker_c, g_version_accesslist_c, g_version_clean_c, g_version_fullscrape_c,
575 return sprintf( reply, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 550 g_version_http_c, g_version_iovec_c, g_version_mutex_c, g_version_stats_c, g_version_udp_c, g_version_vector_c,
576 g_version_opentracker_c, g_version_accesslist_c, g_version_clean_c, g_version_fullscrape_c, g_version_http_c, 551 g_version_scan_urlencoded_query_c, g_version_trackerlogic_c, g_version_livesync_c, g_version_rijndael_c);
577 g_version_iovec_c, g_version_mutex_c, g_version_stats_c, g_version_udp_c, g_version_vector_c, 552}
578 g_version_scan_urlencoded_query_c, g_version_trackerlogic_c, g_version_livesync_c, g_version_rijndael_c ); 553
579} 554size_t return_stats_for_tracker(char *reply, int mode, int format) {
580 555 (void)format;
581size_t return_stats_for_tracker( char *reply, int mode, int format ) { 556 switch (mode & TASK_TASK_MASK) {
582 (void) format; 557 case TASK_STATS_CONNS:
583 switch( mode & TASK_TASK_MASK ) { 558 return stats_connections_mrtg(reply);
584 case TASK_STATS_CONNS: 559 case TASK_STATS_SCRAPE:
585 return stats_connections_mrtg( reply ); 560 return stats_scrape_mrtg(reply);
586 case TASK_STATS_SCRAPE: 561 case TASK_STATS_UDP:
587 return stats_scrape_mrtg( reply ); 562 return stats_udpconnections_mrtg(reply);
588 case TASK_STATS_UDP: 563 case TASK_STATS_TCP:
589 return stats_udpconnections_mrtg( reply ); 564 return stats_tcpconnections_mrtg(reply);
590 case TASK_STATS_TCP: 565 case TASK_STATS_FULLSCRAPE:
591 return stats_tcpconnections_mrtg( reply ); 566 return stats_fullscrapes_mrtg(reply);
592 case TASK_STATS_FULLSCRAPE: 567 case TASK_STATS_COMPLETED:
593 return stats_fullscrapes_mrtg( reply ); 568 return stats_return_completed_mrtg(reply);
594 case TASK_STATS_COMPLETED: 569 case TASK_STATS_HTTPERRORS:
595 return stats_return_completed_mrtg( reply ); 570 return stats_httperrors_txt(reply);
596 case TASK_STATS_HTTPERRORS: 571 case TASK_STATS_VERSION:
597 return stats_httperrors_txt( reply ); 572 return stats_return_tracker_version(reply);
598 case TASK_STATS_VERSION: 573 case TASK_STATS_RENEW:
599 return stats_return_tracker_version( reply ); 574 return stats_return_renew_bucket(reply);
600 case TASK_STATS_RENEW: 575 case TASK_STATS_SYNCS:
601 return stats_return_renew_bucket( reply ); 576 return stats_return_sync_mrtg(reply);
602 case TASK_STATS_SYNCS:
603 return stats_return_sync_mrtg( reply );
604#ifdef WANT_LOG_NUMWANT 577#ifdef WANT_LOG_NUMWANT
605 case TASK_STATS_NUMWANTS: 578 case TASK_STATS_NUMWANTS:
606 return stats_return_numwants( reply ); 579 return stats_return_numwants(reply);
607#endif 580#endif
608 default: 581 default:
609 return 0; 582 return 0;
610 } 583 }
611} 584}
612 585
613static void stats_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) { 586static void stats_make(int *iovec_entries, struct iovec **iovector, ot_tasktype mode) {
614 char *r; 587 char *r;
615 588
616 *iovec_entries = 0; 589 *iovec_entries = 0;
617 *iovector = NULL; 590 *iovector = NULL;
618 if( !( r = iovec_increase( iovec_entries, iovector, OT_STATS_TMPSIZE ) ) ) 591 if (!(r = iovec_increase(iovec_entries, iovector, OT_STATS_TMPSIZE)))
619 return; 592 return;
620 593
621 switch( mode & TASK_TASK_MASK ) { 594 switch (mode & TASK_TASK_MASK) {
622 case TASK_STATS_TORRENTS: r += stats_torrents_mrtg( r ); break; 595 case TASK_STATS_TORRENTS:
623 case TASK_STATS_PEERS: r += stats_peers_mrtg( r ); break; 596 r += stats_torrents_mrtg(r);
624 case TASK_STATS_SLASH24S: r += stats_slash24s_txt( r, 128 ); break; 597 break;
625 case TASK_STATS_TOP10: r += stats_top_txt( r, 10 ); break; 598 case TASK_STATS_PEERS:
626 case TASK_STATS_TOP100: 599 r += stats_peers_mrtg(r);
627 r = iovec_fix_increase_or_free( iovec_entries, iovector, r, 4 * OT_STATS_TMPSIZE ); 600 break;
628 if( !r ) return; 601 case TASK_STATS_SLASH24S:
629 r += stats_top_txt( r, 100 ); break; 602 r += stats_slash24s_txt(r, 128);
630 case TASK_STATS_EVERYTHING: r = iovec_fix_increase_or_free( iovec_entries, iovector, r, OT_STATS_TMPSIZE + 64 * OT_PEER_TIMEOUT ); 603 break;
631 if( !r ) return; 604 case TASK_STATS_TOP10:
632 r += stats_return_everything( r ); break; 605 r += stats_top_txt(r, 10);
606 break;
607 case TASK_STATS_TOP100:
608 r = iovec_fix_increase_or_free(iovec_entries, iovector, r, 4 * OT_STATS_TMPSIZE);
609 if (!r)
610 return;
611 r += stats_top_txt(r, 100);
612 break;
613 case TASK_STATS_EVERYTHING:
614 r = iovec_fix_increase_or_free(iovec_entries, iovector, r, OT_STATS_TMPSIZE + 64 * OT_PEER_TIMEOUT);
615 if (!r)
616 return;
617 r += stats_return_everything(r);
618 break;
633#ifdef WANT_SPOT_WOODPECKER 619#ifdef WANT_SPOT_WOODPECKER
634 case TASK_STATS_WOODPECKERS: r += stats_return_woodpeckers( r, 128 ); break; 620 case TASK_STATS_WOODPECKERS:
621 r += stats_return_woodpeckers(r, 128);
622 break;
635#endif 623#endif
636#ifdef WANT_FULLLOG_NETWORKS 624#ifdef WANT_FULLLOG_NETWORKS
637 case TASK_STATS_FULLLOG: stats_return_fulllog( iovec_entries, iovector, r ); 625 case TASK_STATS_FULLLOG:
638 return; 626 stats_return_fulllog(iovec_entries, iovector, r);
627 return;
639#endif 628#endif
640 default: 629 default:
641 iovec_free(iovec_entries, iovector); 630 iovec_free(iovec_entries, iovector);
642 return; 631 return;
643 } 632 }
644 iovec_fixlast( iovec_entries, iovector, r ); 633 iovec_fixlast(iovec_entries, iovector, r);
645} 634}
646 635
647void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ) { 636void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data) {
648 switch( event ) { 637 switch (event) {
649 case EVENT_ACCEPT: 638 case EVENT_ACCEPT:
650 if( proto == FLAG_TCP ) ot_overall_tcp_connections++; else ot_overall_udp_connections++; 639 if (proto == FLAG_TCP)
640 ot_overall_tcp_connections++;
641 else
642 ot_overall_udp_connections++;
651#ifdef WANT_LOG_NETWORKS 643#ifdef WANT_LOG_NETWORKS
652 stat_increase_network_count( &stats_network_counters_root, 0, event_data ); 644 stat_increase_network_count(&stats_network_counters_root, 0, event_data);
653#endif 645#endif
654 break; 646 break;
655 case EVENT_ANNOUNCE: 647 case EVENT_ANNOUNCE:
656 if( proto == FLAG_TCP ) ot_overall_tcp_successfulannounces++; else ot_overall_udp_successfulannounces++; 648 if (proto == FLAG_TCP)
657 break; 649 ot_overall_tcp_successfulannounces++;
658 case EVENT_CONNECT: 650 else
659 if( proto == FLAG_TCP ) ot_overall_tcp_connects++; else ot_overall_udp_connects++; 651 ot_overall_udp_successfulannounces++;
660 break; 652 break;
661 case EVENT_COMPLETED: 653 case EVENT_CONNECT:
654 if (proto == FLAG_TCP)
655 ot_overall_tcp_connects++;
656 else
657 ot_overall_udp_connects++;
658 break;
659 case EVENT_COMPLETED:
662#ifdef WANT_SYSLOGS 660#ifdef WANT_SYSLOGS
663 if( event_data) { 661 if (event_data) {
664 struct ot_workstruct *ws = (struct ot_workstruct *)event_data; 662 struct ot_workstruct *ws = (struct ot_workstruct *)event_data;
665 char timestring[64]; 663 char timestring[64];
666 char hash_hex[42], peerid_hex[42], ip_readable[64]; 664 char hash_hex[42], peerid_hex[42], ip_readable[64];
667 struct tm time_now; 665 struct tm time_now;
668 time_t ttt; 666 time_t ttt;
669 667
670 time( &ttt ); 668 time(&ttt);
671 localtime_r( &ttt, &time_now ); 669 localtime_r(&ttt, &time_now);
672 strftime( timestring, sizeof( timestring ), "%FT%T%z", &time_now ); 670 strftime(timestring, sizeof(timestring), "%FT%T%z", &time_now);
673 671
674 to_hex( hash_hex, *ws->hash ); 672 to_hex(hash_hex, *ws->hash);
675 if( ws->peer_id ) 673 if (ws->peer_id)
676 to_hex( peerid_hex, (uint8_t*)ws->peer_id ); 674 to_hex(peerid_hex, (uint8_t *)ws->peer_id);
677 else { 675 else {
678 *peerid_hex=0; 676 *peerid_hex = 0;
679 } 677 }
680 678
681 ip_readable[ fmt_ip6c( ip_readable, (char*)&ws->peer ) ] = 0; 679 ip_readable[fmt_ip6c(ip_readable, (char *)&ws->peer)] = 0;
682#if 0 680#if 0
683 /* XXX */ 681 /* XXX */
684 ip_readable[ fmt_ip4( ip_readable, (char*)&ws->peer ) ] = 0; 682 ip_readable[ fmt_ip4( ip_readable, (char*)&ws->peer ) ] = 0;
685#endif 683#endif
686 syslog( LOG_INFO, "time=%s event=completed info_hash=%s peer_id=%s ip=%s", timestring, hash_hex, peerid_hex, ip_readable ); 684 syslog(LOG_INFO, "time=%s event=completed info_hash=%s peer_id=%s ip=%s", timestring, hash_hex, peerid_hex, ip_readable);
687 }
688#endif
689 ot_overall_completed++;
690 break;
691 case EVENT_SCRAPE:
692 if( proto == FLAG_TCP ) ot_overall_tcp_successfulscrapes++; else ot_overall_udp_successfulscrapes++;
693 break;
694 case EVENT_FULLSCRAPE:
695 ot_full_scrape_count++;
696 ot_full_scrape_size += event_data;
697 break;
698 case EVENT_FULLSCRAPE_REQUEST:
699 {
700 ot_ip6 *ip = (ot_ip6*)event_data; /* ugly hack to transfer ip to stats */
701 char _debug[512];
702 int off = snprintf( _debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time)/60 );
703 off += fmt_ip6c( _debug+off, *ip );
704 off += snprintf( _debug+off, sizeof(_debug)-off, " - FULL SCRAPE\n" );
705 (void)write( 2, _debug, off );
706 ot_full_scrape_request_count++;
707 }
708 break;
709 case EVENT_FULLSCRAPE_REQUEST_GZIP:
710 {
711 ot_ip6 *ip = (ot_ip6*)event_data; /* ugly hack to transfer ip to stats */
712 char _debug[512];
713 int off = snprintf( _debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time)/60 );
714 off += fmt_ip6c(_debug+off, *ip );
715 off += snprintf( _debug+off, sizeof(_debug)-off, " - FULL SCRAPE\n" );
716 (void)write( 2, _debug, off );
717 ot_full_scrape_request_count++;
718 } 685 }
719 break; 686#endif
720 case EVENT_FAILED: 687 ot_overall_completed++;
721 ot_failed_request_counts[event_data]++; 688 break;
722 break; 689 case EVENT_SCRAPE:
723 case EVENT_RENEW: 690 if (proto == FLAG_TCP)
724 ot_renewed[event_data]++; 691 ot_overall_tcp_successfulscrapes++;
725 break; 692 else
726 case EVENT_SYNC: 693 ot_overall_udp_successfulscrapes++;
727 ot_overall_sync_count+=event_data; 694 break;
728 break; 695 case EVENT_FULLSCRAPE:
729 case EVENT_BUCKET_LOCKED: 696 ot_full_scrape_count++;
730 ot_overall_stall_count++; 697 ot_full_scrape_size += event_data;
731 break; 698 break;
699 case EVENT_FULLSCRAPE_REQUEST: {
700 ot_ip6 *ip = (ot_ip6 *)event_data; /* ugly hack to transfer ip to stats */
701 char _debug[512];
702 int off = snprintf(_debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time) / 60);
703 off += fmt_ip6c(_debug + off, *ip);
704 off += snprintf(_debug + off, sizeof(_debug) - off, " - FULL SCRAPE\n");
705 (void)write(2, _debug, off);
706 ot_full_scrape_request_count++;
707 } break;
708 case EVENT_FULLSCRAPE_REQUEST_GZIP: {
709 ot_ip6 *ip = (ot_ip6 *)event_data; /* ugly hack to transfer ip to stats */
710 char _debug[512];
711 int off = snprintf(_debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time) / 60);
712 off += fmt_ip6c(_debug + off, *ip);
713 off += snprintf(_debug + off, sizeof(_debug) - off, " - FULL SCRAPE\n");
714 (void)write(2, _debug, off);
715 ot_full_scrape_request_count++;
716 } break;
717 case EVENT_FAILED:
718 ot_failed_request_counts[event_data]++;
719 break;
720 case EVENT_RENEW:
721 ot_renewed[event_data]++;
722 break;
723 case EVENT_SYNC:
724 ot_overall_sync_count += event_data;
725 break;
726 case EVENT_BUCKET_LOCKED:
727 ot_overall_stall_count++;
728 break;
732#ifdef WANT_SPOT_WOODPECKER 729#ifdef WANT_SPOT_WOODPECKER
733 case EVENT_WOODPECKER: 730 case EVENT_WOODPECKER:
734 pthread_mutex_lock( &g_woodpeckers_mutex ); 731 pthread_mutex_lock(&g_woodpeckers_mutex);
735 stat_increase_network_count( &stats_woodpeckers_tree, 0, event_data ); 732 stat_increase_network_count(&stats_woodpeckers_tree, 0, event_data);
736 pthread_mutex_unlock( &g_woodpeckers_mutex ); 733 pthread_mutex_unlock(&g_woodpeckers_mutex);
737 break; 734 break;
738#endif 735#endif
739 case EVENT_CONNID_MISSMATCH: 736 case EVENT_CONNID_MISSMATCH:
740 ++ot_overall_udp_connectionidmissmatches; 737 ++ot_overall_udp_connectionidmissmatches;
741 default: 738 default:
742 break; 739 break;
743 } 740 }
744} 741}
745 742
746void stats_cleanup() { 743void stats_cleanup() {
747#ifdef WANT_SPOT_WOODPECKER 744#ifdef WANT_SPOT_WOODPECKER
748 pthread_mutex_lock( &g_woodpeckers_mutex ); 745 pthread_mutex_lock(&g_woodpeckers_mutex);
749 stats_shift_down_network_count( &stats_woodpeckers_tree, 0, 1 ); 746 stats_shift_down_network_count(&stats_woodpeckers_tree, 0, 1);
750 pthread_mutex_unlock( &g_woodpeckers_mutex ); 747 pthread_mutex_unlock(&g_woodpeckers_mutex);
751#endif 748#endif
752} 749}
753 750
754static void * stats_worker( void * args ) { 751static void *stats_worker(void *args) {
755 int iovec_entries; 752 int iovec_entries;
756 struct iovec *iovector; 753 struct iovec *iovector;
757 754
758 (void) args; 755 (void)args;
759 756
760 while( 1 ) { 757 while (1) {
761 ot_tasktype tasktype = TASK_STATS; 758 ot_tasktype tasktype = TASK_STATS;
762 ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); 759 ot_taskid taskid = mutex_workqueue_poptask(&tasktype);
763 stats_make( &iovec_entries, &iovector, tasktype ); 760 stats_make(&iovec_entries, &iovector, tasktype);
764 if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) 761 if (mutex_workqueue_pushresult(taskid, iovec_entries, iovector))
765 iovec_free( &iovec_entries, &iovector ); 762 iovec_free(&iovec_entries, &iovector);
766 } 763 }
767 return NULL; 764 return NULL;
768} 765}
769 766
770void stats_deliver( int64 sock, int tasktype ) { 767void stats_deliver(int64 sock, int tasktype) { mutex_workqueue_pushtask(sock, tasktype); }
771 mutex_workqueue_pushtask( sock, tasktype );
772}
773 768
774static pthread_t thread_id; 769static pthread_t thread_id;
775void stats_init( ) { 770void stats_init() {
776 ot_start_time = g_now_seconds; 771 ot_start_time = g_now_seconds;
777 pthread_create( &thread_id, NULL, stats_worker, NULL ); 772 pthread_create(&thread_id, NULL, stats_worker, NULL);
778} 773}
779 774
780void stats_deinit( ) { 775void stats_deinit() {
781 pthread_cancel( thread_id ); 776 pthread_cancel(thread_id);
782} 777}
783 778
784const char *g_version_stats_c = "$Source$: $Revision$\n"; 779const char *g_version_stats_c = "$Source$: $Revision$\n";
diff --git a/ot_stats.h b/ot_stats.h
index ed60f68..a354c19 100644
--- a/ot_stats.h
+++ b/ot_stats.h
@@ -6,10 +6,12 @@
6#ifndef OT_STATS_H__ 6#ifndef OT_STATS_H__
7#define OT_STATS_H__ 7#define OT_STATS_H__
8 8
9#include "trackerlogic.h"
10
9typedef enum { 11typedef enum {
10 EVENT_ACCEPT, 12 EVENT_ACCEPT,
11 EVENT_READ, 13 EVENT_READ,
12 EVENT_CONNECT, /* UDP only */ 14 EVENT_CONNECT, /* UDP only */
13 EVENT_ANNOUNCE, 15 EVENT_ANNOUNCE,
14 EVENT_COMPLETED, 16 EVENT_COMPLETED,
15 EVENT_RENEW, 17 EVENT_RENEW,
@@ -17,7 +19,7 @@ typedef enum {
17 EVENT_SCRAPE, 19 EVENT_SCRAPE,
18 EVENT_FULLSCRAPE_REQUEST, 20 EVENT_FULLSCRAPE_REQUEST,
19 EVENT_FULLSCRAPE_REQUEST_GZIP, 21 EVENT_FULLSCRAPE_REQUEST_GZIP,
20 EVENT_FULLSCRAPE, /* TCP only */ 22 EVENT_FULLSCRAPE, /* TCP only */
21 EVENT_FAILED, 23 EVENT_FAILED,
22 EVENT_BUCKET_LOCKED, 24 EVENT_BUCKET_LOCKED,
23 EVENT_WOODPECKER, 25 EVENT_WOODPECKER,
@@ -38,13 +40,13 @@ enum {
38 CODE_HTTPERROR_COUNT 40 CODE_HTTPERROR_COUNT
39}; 41};
40 42
41void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ); 43void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data);
42void stats_deliver( int64 sock, int tasktype ); 44void stats_deliver(int64 sock, int tasktype);
43void stats_cleanup( void ); 45void stats_cleanup(void);
44size_t return_stats_for_tracker( char *reply, int mode, int format ); 46size_t return_stats_for_tracker(char *reply, int mode, int format);
45size_t stats_return_tracker_version( char *reply ); 47size_t stats_return_tracker_version(char *reply);
46void stats_init( void ); 48void stats_init(void);
47void stats_deinit( void ); 49void stats_deinit(void);
48 50
49extern const char *g_version_rijndael_c; 51extern const char *g_version_rijndael_c;
50extern const char *g_version_livesync_c; 52extern const char *g_version_livesync_c;
diff --git a/ot_udp.c b/ot_udp.c
index c32a7e2..912c7e4 100644
--- a/ot_udp.c
+++ b/ot_udp.c
@@ -4,31 +4,31 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <stdlib.h>
8#include <pthread.h>
9#include <string.h>
10#include <arpa/inet.h> 7#include <arpa/inet.h>
8#include <pthread.h>
11#include <stdio.h> 9#include <stdio.h>
10#include <stdlib.h>
11#include <string.h>
12 12
13/* Libowfat */ 13/* Libowfat */
14#include "socket.h"
15#include "io.h" 14#include "io.h"
16#include "ip6.h" 15#include "ip6.h"
16#include "socket.h"
17 17
18/* Opentracker */ 18/* Opentracker */
19#include "trackerlogic.h"
20#include "ot_udp.h"
21#include "ot_stats.h"
22#include "ot_rijndael.h" 19#include "ot_rijndael.h"
20#include "ot_stats.h"
21#include "ot_udp.h"
22#include "trackerlogic.h"
23 23
24#if 0 24#if 0
25static const uint8_t g_static_connid[8] = { 0x23, 0x42, 0x05, 0x17, 0xde, 0x41, 0x50, 0xff }; 25static const uint8_t g_static_connid[8] = { 0x23, 0x42, 0x05, 0x17, 0xde, 0x41, 0x50, 0xff };
26#endif 26#endif
27static uint32_t g_rijndael_round_key[44] = {0}; 27static uint32_t g_rijndael_round_key[44] = {0};
28static uint32_t g_key_of_the_hour[2] = {0}; 28static uint32_t g_key_of_the_hour[2] = {0};
29static ot_time g_hour_of_the_key; 29static ot_time g_hour_of_the_key;
30 30
31static void udp_generate_rijndael_round_key() { 31static void udp_generate_rijndael_round_key() {
32 uint32_t key[16]; 32 uint32_t key[16];
33#ifdef WANT_ARC4RANDOM 33#ifdef WANT_ARC4RANDOM
34 arc4random_buf(&key[0], sizeof(key)); 34 arc4random_buf(&key[0], sizeof(key));
@@ -38,7 +38,7 @@ static void udp_generate_rijndael_round_key() {
38 key[2] = random(); 38 key[2] = random();
39 key[3] = random(); 39 key[3] = random();
40#endif 40#endif
41 rijndaelKeySetupEnc128( g_rijndael_round_key, (uint8_t*)key ); 41 rijndaelKeySetupEnc128(g_rijndael_round_key, (uint8_t *)key);
42 42
43#ifdef WANT_ARC4RANDOM 43#ifdef WANT_ARC4RANDOM
44 g_key_of_the_hour[0] = arc4random(); 44 g_key_of_the_hour[0] = arc4random();
@@ -49,181 +49,190 @@ static void udp_generate_rijndael_round_key() {
49} 49}
50 50
51/* Generate current and previous connection id for ip */ 51/* Generate current and previous connection id for ip */
52static void udp_make_connectionid( uint32_t connid[2], const ot_ip6 remoteip, int age ) { 52static void udp_make_connectionid(uint32_t connid[2], const ot_ip6 remoteip, int age) {
53 uint32_t plain[4], crypt[4]; 53 uint32_t plain[4], crypt[4];
54 int i; 54 int i;
55 if( g_now_minutes + 60 > g_hour_of_the_key ) { 55 if (g_now_minutes + 60 > g_hour_of_the_key) {
56 g_hour_of_the_key = g_now_minutes; 56 g_hour_of_the_key = g_now_minutes;
57 g_key_of_the_hour[1] = g_key_of_the_hour[0]; 57 g_key_of_the_hour[1] = g_key_of_the_hour[0];
58#ifdef WANT_ARC4RANDOM 58#ifdef WANT_ARC4RANDOM
59 g_key_of_the_hour[0] = arc4random(); 59 g_key_of_the_hour[0] = arc4random();
60#else 60#else
61 g_key_of_the_hour[0] = random(); 61 g_key_of_the_hour[0] = random();
62#endif 62#endif
63 } 63 }
64 64
65 memcpy( plain, remoteip, sizeof( plain ) ); 65 memcpy(plain, remoteip, sizeof(plain));
66 for( i=0; i<4; ++i ) plain[i] ^= g_key_of_the_hour[age]; 66 for (i = 0; i < 4; ++i)
67 rijndaelEncrypt128( g_rijndael_round_key, (uint8_t*)remoteip, (uint8_t*)crypt ); 67 plain[i] ^= g_key_of_the_hour[age];
68 rijndaelEncrypt128(g_rijndael_round_key, (uint8_t *)remoteip, (uint8_t *)crypt);
68 connid[0] = crypt[0] ^ crypt[1]; 69 connid[0] = crypt[0] ^ crypt[1];
69 connid[1] = crypt[2] ^ crypt[3]; 70 connid[1] = crypt[2] ^ crypt[3];
70} 71}
71 72
72/* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */ 73/* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */
73int handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { 74int handle_udp6(int64 serversocket, struct ot_workstruct *ws) {
74 ot_ip6 remoteip; 75 ot_ip6 remoteip;
75 uint32_t *inpacket = (uint32_t*)ws->inbuf; 76 uint32_t *inpacket = (uint32_t *)ws->inbuf;
76 uint32_t *outpacket = (uint32_t*)ws->outbuf; 77 uint32_t *outpacket = (uint32_t *)ws->outbuf;
77 uint32_t left, event, scopeid; 78 uint32_t left, event, scopeid;
78 uint32_t connid[2]; 79 uint32_t connid[2];
79 uint32_t action; 80 uint32_t action;
80 uint16_t port, remoteport; 81 uint16_t port, remoteport;
81 size_t byte_count, scrape_count; 82 size_t byte_count, scrape_count;
82 83
83 byte_count = socket_recv6( serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid ); 84 byte_count = socket_recv6(serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid);
84 if( !byte_count ) return 0; 85 if (!byte_count)
85 86 return 0;
86 stats_issue_event( EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip ); 87
87 stats_issue_event( EVENT_READ, FLAG_UDP, byte_count ); 88 stats_issue_event(EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip);
89 stats_issue_event(EVENT_READ, FLAG_UDP, byte_count);
88 90
89 /* Minimum udp tracker packet size, also catches error */ 91 /* Minimum udp tracker packet size, also catches error */
90 if( byte_count < 16 ) 92 if (byte_count < 16)
91 return 1; 93 return 1;
92 94
93 /* Get action to take. Ignore error messages and broken packets */ 95 /* Get action to take. Ignore error messages and broken packets */
94 action = ntohl( inpacket[2] ); 96 action = ntohl(inpacket[2]);
95 if( action > 2 ) 97 if (action > 2)
96 return 1; 98 return 1;
97 99
98 /* Generate the connection id we give out and expect to and from 100 /* Generate the connection id we give out and expect to and from
99 the requesting ip address, this prevents udp spoofing */ 101 the requesting ip address, this prevents udp spoofing */
100 udp_make_connectionid( connid, remoteip, 0 ); 102 udp_make_connectionid(connid, remoteip, 0);
101 103
102 /* Initialise hash pointer */ 104 /* Initialise hash pointer */
103 ws->hash = NULL; 105 ws->hash = NULL;
104 ws->peer_id = NULL; 106 ws->peer_id = NULL;
105 107
106 /* If action is not 0 (connect), then we expect the derived 108 /* If action is not 0 (connect), then we expect the derived
107 connection id in first 64 bit */ 109 connection id in first 64 bit */
108 if( ( action > 0 ) && ( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) ) { 110 if ((action > 0) && (inpacket[0] != connid[0] || inpacket[1] != connid[1])) {
109 /* If connection id does not match, try the one that was 111 /* If connection id does not match, try the one that was
110 valid in the previous hour. Only if this also does not 112 valid in the previous hour. Only if this also does not
111 match, return an error packet */ 113 match, return an error packet */
112 udp_make_connectionid( connid, remoteip, 1 ); 114 udp_make_connectionid(connid, remoteip, 1);
113 if( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) { 115 if (inpacket[0] != connid[0] || inpacket[1] != connid[1]) {
114 const size_t s = sizeof( "Connection ID missmatch." ); 116 const size_t s = sizeof("Connection ID missmatch.");
115 outpacket[0] = htonl( 3 ); outpacket[1] = inpacket[3]; 117 outpacket[0] = htonl(3);
116 memcpy( &outpacket[2], "Connection ID missmatch.", s ); 118 outpacket[1] = inpacket[3];
117 socket_send6( serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0 ); 119 memcpy(&outpacket[2], "Connection ID missmatch.", s);
118 stats_issue_event( EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s ); 120 socket_send6(serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0);
121 stats_issue_event(EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s);
119 return 1; 122 return 1;
120 } 123 }
121 } 124 }
122 125
123 switch( action ) { 126 switch (action) {
124 case 0: /* This is a connect action */ 127 case 0: /* This is a connect action */
125 /* look for udp bittorrent magic id */ 128 /* look for udp bittorrent magic id */
126 if( (ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980) ) 129 if ((ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980))
127 return 1; 130 return 1;
131
132 outpacket[0] = 0;
133 outpacket[1] = inpacket[3];
134 outpacket[2] = connid[0];
135 outpacket[3] = connid[1];
136
137 socket_send6(serversocket, ws->outbuf, 16, remoteip, remoteport, 0);
138 stats_issue_event(EVENT_CONNECT, FLAG_UDP, 16);
139 break;
140 case 1: /* This is an announce action */
141 /* Minimum udp announce packet size */
142 if (byte_count < 98)
143 return 1;
144
145 /* We do only want to know, if it is zero */
146 left = inpacket[64 / 4] | inpacket[68 / 4];
147
148 event = ntohl(inpacket[80 / 4]);
149 port = *(uint16_t *)(((char *)inpacket) + 96);
150 ws->hash = (ot_hash *)(((char *)inpacket) + 16);
128 151
129 outpacket[0] = 0; 152 OT_SETIP(ws->peer, remoteip);
130 outpacket[1] = inpacket[3]; 153 OT_SETPORT(ws->peer, &port);
131 outpacket[2] = connid[0]; 154 OT_PEERFLAG(ws->peer) = 0;
132 outpacket[3] = connid[1];
133 155
134 socket_send6( serversocket, ws->outbuf, 16, remoteip, remoteport, 0 ); 156 switch (event) {
135 stats_issue_event( EVENT_CONNECT, FLAG_UDP, 16 ); 157 case 1:
158 OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED;
136 break; 159 break;
137 case 1: /* This is an announce action */ 160 case 3:
138 /* Minimum udp announce packet size */ 161 OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED;
139 if( byte_count < 98 )
140 return 1;
141
142 /* We do only want to know, if it is zero */
143 left = inpacket[64/4] | inpacket[68/4];
144
145 event = ntohl( inpacket[80/4] );
146 port = *(uint16_t*)( ((char*)inpacket) + 96 );
147 ws->hash = (ot_hash*)( ((char*)inpacket) + 16 );
148
149 OT_SETIP( ws->peer, remoteip );
150 OT_SETPORT( ws->peer, &port );
151 OT_PEERFLAG( ws->peer ) = 0;
152
153 switch( event ) {
154 case 1: OT_PEERFLAG( ws->peer ) |= PEER_FLAG_COMPLETED; break;
155 case 3: OT_PEERFLAG( ws->peer ) |= PEER_FLAG_STOPPED; break;
156 default: break;
157 }
158
159 if( !left )
160 OT_PEERFLAG( ws->peer ) |= PEER_FLAG_SEEDING;
161
162 outpacket[0] = htonl( 1 ); /* announce action */
163 outpacket[1] = inpacket[12/4];
164
165 if( OT_PEERFLAG( ws->peer ) & PEER_FLAG_STOPPED ) { /* Peer is gone. */
166 ws->reply = ws->outbuf;
167 ws->reply_size = remove_peer_from_torrent( FLAG_UDP, ws );
168 } else {
169 /* Limit amount of peers to OT_MAX_PEERS_UDP */
170 uint32_t numwant = ntohl( inpacket[92/4] );
171 size_t max_peers = ip6_isv4mapped(remoteip) ? OT_MAX_PEERS_UDP4 : OT_MAX_PEERS_UDP6;
172 if (numwant > max_peers) numwant = max_peers;
173
174 ws->reply = ws->outbuf + 8;
175 ws->reply_size = 8 + add_peer_to_torrent_and_return_peers( FLAG_UDP, ws, numwant );
176 }
177
178 socket_send6( serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0 );
179 stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size );
180 break; 162 break;
163 default:
164 break;
165 }
181 166
182 case 2: /* This is a scrape action */ 167 if (!left)
183 outpacket[0] = htonl( 2 ); /* scrape action */ 168 OT_PEERFLAG(ws->peer) |= PEER_FLAG_SEEDING;
184 outpacket[1] = inpacket[12/4];
185 169
186 for( scrape_count = 0; ( scrape_count * 20 < byte_count - 16) && ( scrape_count <= 74 ); scrape_count++ ) 170 outpacket[0] = htonl(1); /* announce action */
187 return_udp_scrape_for_torrent( *(ot_hash*)( ((char*)inpacket) + 16 + 20 * scrape_count ), ((char*)outpacket) + 8 + 12 * scrape_count ); 171 outpacket[1] = inpacket[12 / 4];
188 172
189 socket_send6( serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0 ); 173 if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED) { /* Peer is gone. */
190 stats_issue_event( EVENT_SCRAPE, FLAG_UDP, scrape_count ); 174 ws->reply = ws->outbuf;
191 break; 175 ws->reply_size = remove_peer_from_torrent(FLAG_UDP, ws);
176 } else {
177 /* Limit amount of peers to OT_MAX_PEERS_UDP */
178 uint32_t numwant = ntohl(inpacket[92 / 4]);
179 size_t max_peers = ip6_isv4mapped(remoteip) ? OT_MAX_PEERS_UDP4 : OT_MAX_PEERS_UDP6;
180 if (numwant > max_peers)
181 numwant = max_peers;
182
183 ws->reply = ws->outbuf + 8;
184 ws->reply_size = 8 + add_peer_to_torrent_and_return_peers(FLAG_UDP, ws, numwant);
185 }
186
187 socket_send6(serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0);
188 stats_issue_event(EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size);
189 break;
190
191 case 2: /* This is a scrape action */
192 outpacket[0] = htonl(2); /* scrape action */
193 outpacket[1] = inpacket[12 / 4];
194
195 for (scrape_count = 0; (scrape_count * 20 < byte_count - 16) && (scrape_count <= 74); scrape_count++)
196 return_udp_scrape_for_torrent(*(ot_hash *)(((char *)inpacket) + 16 + 20 * scrape_count), ((char *)outpacket) + 8 + 12 * scrape_count);
197
198 socket_send6(serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0);
199 stats_issue_event(EVENT_SCRAPE, FLAG_UDP, scrape_count);
200 break;
192 } 201 }
193 return 1; 202 return 1;
194} 203}
195 204
196static void* udp_worker( void * args ) { 205static void *udp_worker(void *args) {
197 int64 sock = (int64)args; 206 int64 sock = (int64)args;
198 struct ot_workstruct ws; 207 struct ot_workstruct ws;
199 memset( &ws, 0, sizeof(ws) ); 208 memset(&ws, 0, sizeof(ws));
200 209
201 ws.inbuf=malloc(G_INBUF_SIZE); 210 ws.inbuf = malloc(G_INBUF_SIZE);
202 ws.outbuf=malloc(G_OUTBUF_SIZE); 211 ws.outbuf = malloc(G_OUTBUF_SIZE);
203#ifdef _DEBUG_HTTPERROR 212#ifdef _DEBUG_HTTPERROR
204 ws.debugbuf=malloc(G_DEBUGBUF_SIZE); 213 ws.debugbuf = malloc(G_DEBUGBUF_SIZE);
205#endif 214#endif
206 215
207 while( g_opentracker_running ) 216 while (g_opentracker_running)
208 handle_udp6( sock, &ws ); 217 handle_udp6(sock, &ws);
209 218
210 free( ws.inbuf ); 219 free(ws.inbuf);
211 free( ws.outbuf ); 220 free(ws.outbuf);
212#ifdef _DEBUG_HTTPERROR 221#ifdef _DEBUG_HTTPERROR
213 free( ws.debugbuf ); 222 free(ws.debugbuf);
214#endif 223#endif
215 return NULL; 224 return NULL;
216} 225}
217 226
218void udp_init( int64 sock, unsigned int worker_count ) { 227void udp_init(int64 sock, unsigned int worker_count) {
219 pthread_t thread_id; 228 pthread_t thread_id;
220 if( !g_rijndael_round_key[0] ) 229 if (!g_rijndael_round_key[0])
221 udp_generate_rijndael_round_key(); 230 udp_generate_rijndael_round_key();
222#ifdef _DEBUG 231#ifdef _DEBUG
223 fprintf( stderr, " installing %d workers on udp socket %ld\n", worker_count, (unsigned long)sock ); 232 fprintf(stderr, " installing %d workers on udp socket %ld\n", worker_count, (unsigned long)sock);
224#endif 233#endif
225 while( worker_count-- ) 234 while (worker_count--)
226 pthread_create( &thread_id, NULL, udp_worker, (void *)sock ); 235 pthread_create(&thread_id, NULL, udp_worker, (void *)sock);
227} 236}
228 237
229const char *g_version_udp_c = "$Source$: $Revision$\n"; 238const char *g_version_udp_c = "$Source$: $Revision$\n";
diff --git a/ot_udp.h b/ot_udp.h
index 974c727..a71a3d9 100644
--- a/ot_udp.h
+++ b/ot_udp.h
@@ -6,7 +6,7 @@
6#ifndef OT_UDP_H__ 6#ifndef OT_UDP_H__
7#define OT_UDP_H__ 7#define OT_UDP_H__
8 8
9void udp_init( int64 sock, unsigned int worker_count ); 9void udp_init(int64 sock, unsigned int worker_count);
10int handle_udp6( int64 serversocket, struct ot_workstruct *ws ); 10int handle_udp6(int64 serversocket, struct ot_workstruct *ws);
11 11
12#endif 12#endif
diff --git a/ot_vector.c b/ot_vector.c
index 479e832..744306f 100644
--- a/ot_vector.c
+++ b/ot_vector.c
@@ -4,43 +4,37 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <stddef.h>
8#include <stdint.h>
7#include <stdlib.h> 9#include <stdlib.h>
8#include <string.h> 10#include <string.h>
9#include <strings.h> 11#include <strings.h>
10#include <stdint.h>
11#include <stddef.h>
12 12
13/* Opentracker */ 13/* Opentracker */
14#include "trackerlogic.h" 14#include "trackerlogic.h"
15#include "ot_vector.h"
16 15
17/* Libowfat */ 16/* Libowfat */
18#include "uint32.h"
19#include "uint16.h" 17#include "uint16.h"
18#include "uint32.h"
20 19
21static int vector_compare_peer6(const void *peer1, const void *peer2 ) { 20static int vector_compare_peer6(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE6); }
22 return memcmp( peer1, peer2, OT_PEER_COMPARE_SIZE6 ); 21static int vector_compare_peer4(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE4); }
23}
24static int vector_compare_peer4(const void *peer1, const void *peer2 ) {
25 return memcmp( peer1, peer2, OT_PEER_COMPARE_SIZE4 );
26}
27 22
28/* This function gives us a binary search that returns a pointer, even if 23/* This function gives us a binary search that returns a pointer, even if
29 no exact match is found. In that case it sets exactmatch 0 and gives 24 no exact match is found. In that case it sets exactmatch 0 and gives
30 calling functions the chance to insert data 25 calling functions the chance to insert data
31*/ 26*/
32void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size, 27void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch) {
33 size_t compare_size, int *exactmatch ) {
34 size_t interval = member_count; 28 size_t interval = member_count;
35 29
36 while( interval ) { 30 while (interval) {
37 uint8_t *lookat = ((uint8_t*)base) + member_size * ( interval / 2 ); 31 uint8_t *lookat = ((uint8_t *)base) + member_size * (interval / 2);
38 int cmp = memcmp( lookat, key, compare_size ); 32 int cmp = memcmp(lookat, key, compare_size);
39 if(cmp == 0 ) { 33 if (cmp == 0) {
40 base = lookat; 34 base = lookat;
41 break; 35 break;
42 } 36 }
43 if(cmp < 0) { 37 if (cmp < 0) {
44 base = lookat + member_size; 38 base = lookat + member_size;
45 interval--; 39 interval--;
46 } 40 }
@@ -48,13 +42,14 @@ void *binary_search( const void * const key, const void * base, const size_t mem
48 } 42 }
49 43
50 *exactmatch = interval; 44 *exactmatch = interval;
51 return (void*)base; 45 return (void *)base;
52} 46}
53 47
54static uint8_t vector_hash_peer( ot_peer const *peer, size_t compare_size, int bucket_count ) { 48static uint8_t vector_hash_peer(ot_peer const *peer, size_t compare_size, int bucket_count) {
55 unsigned int hash = 5381; 49 unsigned int hash = 5381;
56 uint8_t *p = (uint8_t*)peer; 50 uint8_t *p = (uint8_t *)peer;
57 while( compare_size-- ) hash += (hash<<5) + *(p++); 51 while (compare_size--)
52 hash += (hash << 5) + *(p++);
58 return hash % bucket_count; 53 return hash % bucket_count;
59} 54}
60 55
@@ -65,58 +60,62 @@ static uint8_t vector_hash_peer( ot_peer const *peer, size_t compare_size, int b
65 if it wasn't found in vector. Caller needs to check the passed "exactmatch" variable to see, whether an insert 60 if it wasn't found in vector. Caller needs to check the passed "exactmatch" variable to see, whether an insert
66 took place. If resizing the vector failed, NULL is returned, else the pointer to the object in vector. 61 took place. If resizing the vector failed, NULL is returned, else the pointer to the object in vector.
67*/ 62*/
68void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch ) { 63void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch) {
69 uint8_t *match = binary_search( key, vector->data, vector->size, member_size, compare_size, exactmatch ); 64 uint8_t *match = binary_search(key, vector->data, vector->size, member_size, compare_size, exactmatch);
70 65
71 if( *exactmatch ) return match; 66 if (*exactmatch)
67 return match;
72 68
73 if( vector->size + 1 > vector->space ) { 69 if (vector->size + 1 > vector->space) {
74 size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; 70 size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS;
75 uint8_t *new_data = realloc( vector->data, new_space * member_size ); 71 uint8_t *new_data = realloc(vector->data, new_space * member_size);
76 if( !new_data ) return NULL; 72 if (!new_data)
73 return NULL;
77 /* Adjust pointer if it moved by realloc */ 74 /* Adjust pointer if it moved by realloc */
78 match = new_data + (match - (uint8_t*)vector->data); 75 match = new_data + (match - (uint8_t *)vector->data);
79 76
80 vector->data = new_data; 77 vector->data = new_data;
81 vector->space = new_space; 78 vector->space = new_space;
82 } 79 }
83 memmove( match + member_size, match, ((uint8_t*)vector->data) + member_size * vector->size - match ); 80 memmove(match + member_size, match, ((uint8_t *)vector->data) + member_size * vector->size - match);
84 81
85 vector->size++; 82 vector->size++;
86 return match; 83 return match;
87} 84}
88 85
89ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch ) { 86ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch) {
90 ot_peer *match, *end; 87 ot_peer *match, *end;
91 const size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); 88 const size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
92 size_t match_to_end; 89 size_t match_to_end;
93 90
94 /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ 91 /* If space is zero but size is set, we're dealing with a list of vector->size buckets */
95 if( vector->space < vector->size ) 92 if (vector->space < vector->size)
96 vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, compare_size, vector->size ); 93 vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size);
97 match = binary_search( peer, vector->data, vector->size, peer_size, compare_size, exactmatch ); 94 match = binary_search(peer, vector->data, vector->size, peer_size, compare_size, exactmatch);
98 95
99 if( *exactmatch ) return match; 96 if (*exactmatch)
97 return match;
100 98
101 /* This is the amount of bytes that needs to be pushed backwards by peer_size bytes to make room for new peer */ 99 /* This is the amount of bytes that needs to be pushed backwards by peer_size bytes to make room for new peer */
102 end = (ot_peer*)vector->data + vector->size * peer_size; 100 end = (ot_peer *)vector->data + vector->size * peer_size;
103 match_to_end = end - match; 101 match_to_end = end - match;
104 102
105 if( vector->size + 1 > vector->space ) { 103 if (vector->size + 1 > vector->space) {
106 ptrdiff_t offset = match - (ot_peer*)vector->data; 104 ptrdiff_t offset = match - (ot_peer *)vector->data;
107 size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; 105 size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS;
108 ot_peer *new_data = realloc( vector->data, new_space * peer_size ); 106 ot_peer *new_data = realloc(vector->data, new_space * peer_size);
109 107
110 if( !new_data ) return NULL; 108 if (!new_data)
109 return NULL;
111 /* Adjust pointer if it moved by realloc */ 110 /* Adjust pointer if it moved by realloc */
112 match = new_data + offset; 111 match = new_data + offset;
113 112
114 vector->data = new_data; 113 vector->data = new_data;
115 vector->space = new_space; 114 vector->space = new_space;
116 } 115 }
117 116
118 /* Here we're guaranteed to have enough space in vector to move the block of peers after insertion point */ 117 /* Here we're guaranteed to have enough space in vector to move the block of peers after insertion point */
119 memmove( match + peer_size, match, match_to_end); 118 memmove(match + peer_size, match, match_to_end);
120 119
121 vector->size++; 120 vector->size++;
122 return match; 121 return match;
@@ -127,130 +126,134 @@ ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer const *peer, siz
127 1 if a non-seeding peer was removed 126 1 if a non-seeding peer was removed
128 2 if a seeding peer was removed 127 2 if a seeding peer was removed
129*/ 128*/
130int vector_remove_peer( ot_vector *vector, ot_peer const *peer, size_t peer_size) { 129int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size) {
131 int exactmatch, was_seeder; 130 int exactmatch, was_seeder;
132 ot_peer *match, *end; 131 ot_peer *match, *end;
133 size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); 132 size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
134 133
135 if( !vector->size ) return 0; 134 if (!vector->size)
135 return 0;
136 136
137 /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ 137 /* If space is zero but size is set, we're dealing with a list of vector->size buckets */
138 if( vector->space < vector->size ) 138 if (vector->space < vector->size)
139 vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, compare_size, vector->size ); 139 vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size);
140 140
141 end = ((ot_peer*)vector->data) + peer_size * vector->size; 141 end = ((ot_peer *)vector->data) + peer_size * vector->size;
142 match = (ot_peer*)binary_search( peer, vector->data, vector->size, peer_size, compare_size, &exactmatch ); 142 match = (ot_peer *)binary_search(peer, vector->data, vector->size, peer_size, compare_size, &exactmatch);
143 if( !exactmatch ) return 0; 143 if (!exactmatch)
144 return 0;
144 145
145 was_seeder = ( OT_PEERFLAG_D( match, peer_size ) & PEER_FLAG_SEEDING ) ? 2 : 1; 146 was_seeder = (OT_PEERFLAG_D(match, peer_size) & PEER_FLAG_SEEDING) ? 2 : 1;
146 memmove( match, match + peer_size, end - match - peer_size ); 147 memmove(match, match + peer_size, end - match - peer_size);
147 148
148 vector->size--; 149 vector->size--;
149 vector_fixup_peers( vector, peer_size ); 150 vector_fixup_peers(vector, peer_size);
150 return was_seeder; 151 return was_seeder;
151} 152}
152 153
153void vector_remove_torrent( ot_vector *vector, ot_torrent *match ) { 154void vector_remove_torrent(ot_vector *vector, ot_torrent *match) {
154 ot_torrent *end = ((ot_torrent*)vector->data) + vector->size; 155 ot_torrent *end = ((ot_torrent *)vector->data) + vector->size;
155 156
156 if( !vector->size ) return; 157 if (!vector->size)
158 return;
157 159
158 /* If this is being called after a unsuccessful malloc() for peer_list 160 /* If this is being called after a unsuccessful malloc() for peer_list
159 in add_peer_to_torrent, match->peer_list actually might be NULL */ 161 in add_peer_to_torrent, match->peer_list actually might be NULL */
160 free_peerlist( match->peer_list6 ); 162 free_peerlist(match->peer_list6);
161 free_peerlist( match->peer_list4 ); 163 free_peerlist(match->peer_list4);
162 164
163 memmove( match, match + 1, sizeof(ot_torrent) * ( end - match - 1 ) ); 165 memmove(match, match + 1, sizeof(ot_torrent) * (end - match - 1));
164 if( ( --vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) && ( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) { 166 if ((--vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) {
165 vector->space /= OT_VECTOR_SHRINK_RATIO; 167 vector->space /= OT_VECTOR_SHRINK_RATIO;
166 vector->data = realloc( vector->data, vector->space * sizeof( ot_torrent ) ); 168 vector->data = realloc(vector->data, vector->space * sizeof(ot_torrent));
167 } 169 }
168} 170}
169 171
170void vector_clean_list( ot_vector * vector, int num_buckets ) { 172void vector_clean_list(ot_vector *vector, int num_buckets) {
171 while( num_buckets-- ) 173 while (num_buckets--)
172 free( vector[num_buckets].data ); 174 free(vector[num_buckets].data);
173 free( vector ); 175 free(vector);
174 return; 176 return;
175} 177}
176 178
177void vector_redistribute_buckets( ot_peerlist * peer_list, size_t peer_size ) { 179void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size) {
178 int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1; 180 int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1;
179 ot_vector * bucket_list_new, * bucket_list_old = &peer_list->peers; 181 ot_vector *bucket_list_new, *bucket_list_old = &peer_list->peers;
180 int (*sort_func)(const void *, const void *) = 182 int (*sort_func)(const void *, const void *) = peer_size == OT_PEER_SIZE6 ? &vector_compare_peer6 : &vector_compare_peer4;
181 peer_size == OT_PEER_SIZE6 ? &vector_compare_peer6 : &vector_compare_peer4;
182 183
183 if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { 184 if (OT_PEERLIST_HASBUCKETS(peer_list)) {
184 num_buckets_old = peer_list->peers.size; 185 num_buckets_old = peer_list->peers.size;
185 bucket_list_old = peer_list->peers.data; 186 bucket_list_old = peer_list->peers.data;
186 } 187 }
187 188
188 if( peer_list->peer_count < 255 ) 189 if (peer_list->peer_count < 255)
189 num_buckets_new = 1; 190 num_buckets_new = 1;
190 else if( peer_list->peer_count > 8192 ) 191 else if (peer_list->peer_count > 8192)
191 num_buckets_new = 64; 192 num_buckets_new = 64;
192 else if( peer_list->peer_count >= 512 && peer_list->peer_count < 4096 ) 193 else if (peer_list->peer_count >= 512 && peer_list->peer_count < 4096)
193 num_buckets_new = 16; 194 num_buckets_new = 16;
194 else if( peer_list->peer_count < 512 && num_buckets_old <= 16 ) 195 else if (peer_list->peer_count < 512 && num_buckets_old <= 16)
195 num_buckets_new = num_buckets_old; 196 num_buckets_new = num_buckets_old;
196 else if( peer_list->peer_count < 512 ) 197 else if (peer_list->peer_count < 512)
197 num_buckets_new = 1; 198 num_buckets_new = 1;
198 else if( peer_list->peer_count < 8192 && num_buckets_old > 1 ) 199 else if (peer_list->peer_count < 8192 && num_buckets_old > 1)
199 num_buckets_new = num_buckets_old; 200 num_buckets_new = num_buckets_old;
200 else 201 else
201 num_buckets_new = 16; 202 num_buckets_new = 16;
202 203
203 if( num_buckets_new == num_buckets_old ) 204 if (num_buckets_new == num_buckets_old)
204 return; 205 return;
205 206
206 /* Assume near perfect distribution */ 207 /* Assume near perfect distribution */
207 bucket_list_new = malloc( num_buckets_new * sizeof( ot_vector ) ); 208 bucket_list_new = malloc(num_buckets_new * sizeof(ot_vector));
208 if( !bucket_list_new) return; 209 if (!bucket_list_new)
209 bzero( bucket_list_new, num_buckets_new * sizeof( ot_vector ) ); 210 return;
211 bzero(bucket_list_new, num_buckets_new * sizeof(ot_vector));
210 212
211 tmp = peer_list->peer_count / num_buckets_new; 213 tmp = peer_list->peer_count / num_buckets_new;
212 bucket_size_new = OT_VECTOR_MIN_MEMBERS; 214 bucket_size_new = OT_VECTOR_MIN_MEMBERS;
213 while( bucket_size_new < tmp) 215 while (bucket_size_new < tmp)
214 bucket_size_new *= OT_VECTOR_GROW_RATIO; 216 bucket_size_new *= OT_VECTOR_GROW_RATIO;
215 217
216 /* preallocate vectors to hold all peers */ 218 /* preallocate vectors to hold all peers */
217 for( bucket=0; bucket<num_buckets_new; ++bucket ) { 219 for (bucket = 0; bucket < num_buckets_new; ++bucket) {
218 bucket_list_new[bucket].space = bucket_size_new; 220 bucket_list_new[bucket].space = bucket_size_new;
219 bucket_list_new[bucket].data = malloc( bucket_size_new * peer_size ); 221 bucket_list_new[bucket].data = malloc(bucket_size_new * peer_size);
220 if( !bucket_list_new[bucket].data ) 222 if (!bucket_list_new[bucket].data)
221 return vector_clean_list( bucket_list_new, num_buckets_new ); 223 return vector_clean_list(bucket_list_new, num_buckets_new);
222 } 224 }
223 225
224 /* Now sort them into the correct bucket */ 226 /* Now sort them into the correct bucket */
225 for( bucket=0; bucket<num_buckets_old; ++bucket ) { 227 for (bucket = 0; bucket < num_buckets_old; ++bucket) {
226 ot_peer * peers_old = bucket_list_old[bucket].data; 228 ot_peer *peers_old = bucket_list_old[bucket].data;
227 int peer_count_old = bucket_list_old[bucket].size; 229 int peer_count_old = bucket_list_old[bucket].size;
228 while( peer_count_old-- ) { 230 while (peer_count_old--) {
229 ot_vector * bucket_dest = bucket_list_new; 231 ot_vector *bucket_dest = bucket_list_new;
230 if( num_buckets_new > 1 ) 232 if (num_buckets_new > 1)
231 bucket_dest += vector_hash_peer(peers_old, OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size), num_buckets_new); 233 bucket_dest += vector_hash_peer(peers_old, OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size), num_buckets_new);
232 if( bucket_dest->size + 1 > bucket_dest->space ) { 234 if (bucket_dest->size + 1 > bucket_dest->space) {
233 void * tmp = realloc( bucket_dest->data, peer_size * OT_VECTOR_GROW_RATIO * bucket_dest->space ); 235 void *tmp = realloc(bucket_dest->data, peer_size * OT_VECTOR_GROW_RATIO * bucket_dest->space);
234 if( !tmp ) return vector_clean_list( bucket_list_new, num_buckets_new ); 236 if (!tmp)
237 return vector_clean_list(bucket_list_new, num_buckets_new);
235 bucket_dest->data = tmp; 238 bucket_dest->data = tmp;
236 bucket_dest->space *= OT_VECTOR_GROW_RATIO; 239 bucket_dest->space *= OT_VECTOR_GROW_RATIO;
237 } 240 }
238 memcpy((ot_peer*)bucket_dest->data + peer_size * bucket_dest->size++, peers_old, peer_size); 241 memcpy((ot_peer *)bucket_dest->data + peer_size * bucket_dest->size++, peers_old, peer_size);
239 peers_old += peer_size; 242 peers_old += peer_size;
240 } 243 }
241 } 244 }
242 245
243 /* Now sort each bucket to later allow bsearch */ 246 /* Now sort each bucket to later allow bsearch */
244 for( bucket=0; bucket<num_buckets_new; ++bucket ) 247 for (bucket = 0; bucket < num_buckets_new; ++bucket)
245 qsort( bucket_list_new[bucket].data, bucket_list_new[bucket].size, peer_size, sort_func ); 248 qsort(bucket_list_new[bucket].data, bucket_list_new[bucket].size, peer_size, sort_func);
246 249
247 /* Everything worked fine. Now link new bucket_list to peer_list */ 250 /* Everything worked fine. Now link new bucket_list to peer_list */
248 if( OT_PEERLIST_HASBUCKETS( peer_list) ) 251 if (OT_PEERLIST_HASBUCKETS(peer_list))
249 vector_clean_list( (ot_vector*)peer_list->peers.data, peer_list->peers.size ); 252 vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size);
250 else 253 else
251 free( peer_list->peers.data ); 254 free(peer_list->peers.data);
252 255
253 if( num_buckets_new > 1 ) { 256 if (num_buckets_new > 1) {
254 peer_list->peers.data = bucket_list_new; 257 peer_list->peers.data = bucket_list_new;
255 peer_list->peers.size = num_buckets_new; 258 peer_list->peers.size = num_buckets_new;
256 peer_list->peers.space = 0; /* Magic marker for "is list of buckets" */ 259 peer_list->peers.space = 0; /* Magic marker for "is list of buckets" */
@@ -258,27 +261,26 @@ void vector_redistribute_buckets( ot_peerlist * peer_list, size_t peer_size ) {
258 peer_list->peers.data = bucket_list_new->data; 261 peer_list->peers.data = bucket_list_new->data;
259 peer_list->peers.size = bucket_list_new->size; 262 peer_list->peers.size = bucket_list_new->size;
260 peer_list->peers.space = bucket_list_new->space; 263 peer_list->peers.space = bucket_list_new->space;
261 free( bucket_list_new ); 264 free(bucket_list_new);
262 } 265 }
263} 266}
264 267
265void vector_fixup_peers( ot_vector * vector, size_t peer_size ) { 268void vector_fixup_peers(ot_vector *vector, size_t peer_size) {
266 int need_fix = 0; 269 int need_fix = 0;
267 270
268 if( !vector->size ) { 271 if (!vector->size) {
269 free( vector->data ); 272 free(vector->data);
270 vector->data = NULL; 273 vector->data = NULL;
271 vector->space = 0; 274 vector->space = 0;
272 return; 275 return;
273 } 276 }
274 277
275 while( ( vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) && 278 while ((vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) {
276 ( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) {
277 vector->space /= OT_VECTOR_SHRINK_RATIO; 279 vector->space /= OT_VECTOR_SHRINK_RATIO;
278 need_fix++; 280 need_fix++;
279 } 281 }
280 if( need_fix ) 282 if (need_fix)
281 vector->data = realloc( vector->data, vector->space * peer_size ); 283 vector->data = realloc(vector->data, vector->space * peer_size);
282} 284}
283 285
284const char *g_version_vector_c = "$Source$: $Revision$\n"; 286const char *g_version_vector_c = "$Source$: $Revision$\n";
diff --git a/ot_vector.h b/ot_vector.h
index 37e2592..8d41452 100644
--- a/ot_vector.h
+++ b/ot_vector.h
@@ -16,22 +16,21 @@
16#define OT_PEER_BUCKET_MAXCOUNT 256 16#define OT_PEER_BUCKET_MAXCOUNT 256
17 17
18typedef struct { 18typedef struct {
19 void *data; 19 void *data;
20 size_t size; 20 size_t size;
21 size_t space; 21 size_t space;
22} ot_vector; 22} ot_vector;
23 23
24void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size, 24void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch);
25 size_t compare_size, int *exactmatch ); 25void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch);
26void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch ); 26ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch);
27ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch );
28 27
29int vector_remove_peer( ot_vector *vector, ot_peer const *peer, size_t peer_size); 28int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size);
30void vector_remove_torrent( ot_vector *vector, ot_torrent *match ); 29void vector_remove_torrent(ot_vector *vector, ot_torrent *match);
31 30
32/* For ot_clean.c */ 31/* For ot_clean.c */
33void vector_redistribute_buckets( ot_peerlist * peer_list, size_t peer_size ); 32void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size);
34void vector_fixup_peers( ot_vector * vector, size_t peer_size ); 33void vector_fixup_peers(ot_vector *vector, size_t peer_size);
35void vector_clean_list( ot_vector * vector, int num_buckets); 34void vector_clean_list(ot_vector *vector, int num_buckets);
36 35
37#endif 36#endif
diff --git a/scan_urlencoded_query.c b/scan_urlencoded_query.c
index a4f89c2..2d3599d 100644
--- a/scan_urlencoded_query.c
+++ b/scan_urlencoded_query.c
@@ -45,37 +45,45 @@ static const unsigned char is_unreserved[256] = {
45 45
46/* Do a fast nibble to hex representation conversion */ 46/* Do a fast nibble to hex representation conversion */
47static unsigned char fromhex(unsigned char x) { 47static unsigned char fromhex(unsigned char x) {
48 x-='0'; if( x<=9) return x; 48 x -= '0';
49 x&=~0x20; x-='A'-'0'; 49 if (x <= 9)
50 if( x<6 ) return x+10; 50 return x;
51 x &= ~0x20;
52 x -= 'A' - '0';
53 if (x < 6)
54 return x + 10;
51 return 0xff; 55 return 0xff;
52} 56}
53 57
54/* Skip the value of a param=value pair */ 58/* Skip the value of a param=value pair */
55void scan_urlencoded_skipvalue( char **string ) { 59void scan_urlencoded_skipvalue(char **string) {
56 const unsigned char* s=*(const unsigned char**) string; 60 const unsigned char *s = *(const unsigned char **)string;
57 unsigned char f; 61 unsigned char f;
58 62
59 /* Since we are asked to skip the 'value', we assume to stop at 63 /* Since we are asked to skip the 'value', we assume to stop at
60 terminators for a 'value' string position */ 64 terminators for a 'value' string position */
61 while( ( f = is_unreserved[ *s++ ] ) & SCAN_SEARCHPATH_VALUE ); 65 while ((f = is_unreserved[*s++]) & SCAN_SEARCHPATH_VALUE)
66 ;
62 67
63 /* If we stopped at a hard terminator like \0 or \n, make the 68 /* If we stopped at a hard terminator like \0 or \n, make the
64 next scan_urlencoded_query encounter it again */ 69 next scan_urlencoded_query encounter it again */
65 if( f & SCAN_SEARCHPATH_TERMINATOR ) --s; 70 if (f & SCAN_SEARCHPATH_TERMINATOR)
71 --s;
66 72
67 *string = (char*)s; 73 *string = (char *)s;
68} 74}
69 75
70int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags) { 76int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags) {
71 char *deststring = *string; 77 char *deststring = *string;
72 ssize_t match_length = scan_urlencoded_query(string, deststring, flags ); 78 ssize_t match_length = scan_urlencoded_query(string, deststring, flags);
73 79
74 if( match_length < 0 ) return match_length; 80 if (match_length < 0)
75 if( match_length == 0 ) return -3; 81 return match_length;
82 if (match_length == 0)
83 return -3;
76 84
77 while( keywords->key ) { 85 while (keywords->key) {
78 if( !strncmp( keywords->key, deststring, match_length ) && !keywords->key[match_length] ) 86 if (!strncmp(keywords->key, deststring, match_length) && !keywords->key[match_length])
79 return keywords->value; 87 return keywords->value;
80 keywords++; 88 keywords++;
81 } 89 }
@@ -84,59 +92,74 @@ int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCH
84} 92}
85 93
86ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_FLAG flags) { 94ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_FLAG flags) {
87 const unsigned char* s=*(const unsigned char**) string; 95 const unsigned char *s = *(const unsigned char **)string;
88 unsigned char *d = (unsigned char*)deststring; 96 unsigned char *d = (unsigned char *)deststring;
89 unsigned char b, c; 97 unsigned char b, c;
90 98
91 /* This is the main decoding loop. 99 /* This is the main decoding loop.
92 'flag' determines, which characters are non-terminating in current context 100 'flag' determines, which characters are non-terminating in current context
93 (ie. stop at '=' and '&' if scanning for a 'param'; stop at '?' if scanning for the path ) 101 (ie. stop at '=' and '&' if scanning for a 'param'; stop at '?' if scanning for the path )
94 */ 102 */
95 while( is_unreserved[ c = *s++ ] & flags ) { 103 while (is_unreserved[c = *s++] & flags) {
96 104
97 /* When encountering an url escaped character, try to decode */ 105 /* When encountering an url escaped character, try to decode */
98 if( c=='%') { 106 if (c == '%') {
99 if( ( b = fromhex(*s++) ) == 0xff ) return -1; 107 if ((b = fromhex(*s++)) == 0xff)
100 if( ( c = fromhex(*s++) ) == 0xff ) return -1; 108 return -1;
101 c|=(b<<4); 109 if ((c = fromhex(*s++)) == 0xff)
110 return -1;
111 c |= (b << 4);
102 } 112 }
103 113
104 /* Write (possibly decoded) character to output */ 114 /* Write (possibly decoded) character to output */
105 *d++ = c; 115 *d++ = c;
106 } 116 }
107 117
108 switch( c ) { 118 switch (c) {
109 case 0: case '\r': case '\n': case ' ': 119 case 0:
120 case '\r':
121 case '\n':
122 case ' ':
110 /* If we started scanning on a hard terminator, indicate we've finished */ 123 /* If we started scanning on a hard terminator, indicate we've finished */
111 if( d == (unsigned char*)deststring ) return -2; 124 if (d == (unsigned char *)deststring)
125 return -2;
112 126
113 /* Else make the next call to scan_urlencoded_param encounter it again */ 127 /* Else make the next call to scan_urlencoded_param encounter it again */
114 --s; 128 --s;
115 break; 129 break;
116 case '?': 130 case '?':
117 if( flags != SCAN_PATH ) return -1; 131 if (flags != SCAN_PATH)
132 return -1;
118 break; 133 break;
119 case '=': 134 case '=':
120 if( flags != SCAN_SEARCHPATH_PARAM ) return -1; 135 if (flags != SCAN_SEARCHPATH_PARAM)
136 return -1;
121 break; 137 break;
122 case '&': 138 case '&':
123 if( flags == SCAN_PATH ) return -1; 139 if (flags == SCAN_PATH)
124 if( flags == SCAN_SEARCHPATH_PARAM ) --s; 140 return -1;
141 if (flags == SCAN_SEARCHPATH_PARAM)
142 --s;
125 break; 143 break;
126 default: 144 default:
127 return -1; 145 return -1;
128 } 146 }
129 147
130 *string = (char *)s; 148 *string = (char *)s;
131 return d - (unsigned char*)deststring; 149 return d - (unsigned char *)deststring;
132} 150}
133 151
134ssize_t scan_fixed_int( char *data, size_t len, int *tmp ) { 152ssize_t scan_fixed_int(char *data, size_t len, int *tmp) {
135 int minus = 0; 153 int minus = 0;
136 *tmp = 0; 154 *tmp = 0;
137 if( *data == '-' ) --len, ++data, ++minus; 155 if (*data == '-')
138 while( (len > 0) && (*data >= '0') && (*data <= '9') ) { --len; *tmp = 10**tmp + *data++-'0'; } 156 --len, ++data, ++minus;
139 if( minus ) *tmp = -*tmp; 157 while ((len > 0) && (*data >= '0') && (*data <= '9')) {
158 --len;
159 *tmp = 10 * *tmp + *data++ - '0';
160 }
161 if (minus)
162 *tmp = -*tmp;
140 return len; 163 return len;
141} 164}
142 165
diff --git a/scan_urlencoded_query.h b/scan_urlencoded_query.h
index 06b91f5..74246e7 100644
--- a/scan_urlencoded_query.h
+++ b/scan_urlencoded_query.h
@@ -38,18 +38,18 @@ ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_F
38 or -2 for terminator found 38 or -2 for terminator found
39 or -3 for no keyword matched 39 or -3 for no keyword matched
40 */ 40 */
41int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags); 41int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags);
42 42
43/* string in: pointer to value of a param=value pair to skip 43/* string in: pointer to value of a param=value pair to skip
44 out: pointer to next scan position on return 44 out: pointer to next scan position on return
45*/ 45*/
46void scan_urlencoded_skipvalue( char **string ); 46void scan_urlencoded_skipvalue(char **string);
47 47
48/* data pointer to len chars of string 48/* data pointer to len chars of string
49 len length of chars in data to parse 49 len length of chars in data to parse
50 number number to receive result 50 number number to receive result
51 returns number of bytes not parsed, mostly !=0 means fail 51 returns number of bytes not parsed, mostly !=0 means fail
52 */ 52 */
53ssize_t scan_fixed_int( char *data, size_t len, int *number ); 53ssize_t scan_fixed_int(char *data, size_t len, int *number);
54 54
55#endif 55#endif
diff --git a/trackerlogic.c b/trackerlogic.c
index f01348d..6cc239e 100644
--- a/trackerlogic.c
+++ b/trackerlogic.c
@@ -4,119 +4,117 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <stdlib.h>
8#include <string.h>
9#include <stdio.h>
10#include <arpa/inet.h> 7#include <arpa/inet.h>
11#include <unistd.h>
12#include <errno.h> 8#include <errno.h>
13#include <stdint.h> 9#include <stdint.h>
10#include <stdio.h>
11#include <stdlib.h>
12#include <string.h>
13#include <unistd.h>
14 14
15/* Libowfat */ 15/* Libowfat */
16#include "array.h"
16#include "byte.h" 17#include "byte.h"
17#include "io.h" 18#include "io.h"
18#include "iob.h" 19#include "iob.h"
19#include "ip6.h" 20#include "ip6.h"
20#include "array.h"
21 21
22/* Opentracker */ 22/* Opentracker */
23#include "trackerlogic.h"
24#include "ot_vector.h"
25#include "ot_mutex.h"
26#include "ot_stats.h"
27#include "ot_clean.h"
28#include "ot_http.h"
29#include "ot_accesslist.h" 23#include "ot_accesslist.h"
24#include "ot_clean.h"
30#include "ot_fullscrape.h" 25#include "ot_fullscrape.h"
26#include "ot_http.h"
31#include "ot_livesync.h" 27#include "ot_livesync.h"
28#include "ot_mutex.h"
29#include "ot_stats.h"
30#include "ot_vector.h"
31#include "trackerlogic.h"
32 32
33/* Forward declaration */ 33/* Forward declaration */
34size_t return_peers_for_torrent( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ); 34size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto);
35 35
36void free_peerlist( ot_peerlist *peer_list ) { 36void free_peerlist(ot_peerlist *peer_list) {
37 if( peer_list->peers.data ) { 37 if (peer_list->peers.data) {
38 if( OT_PEERLIST_HASBUCKETS( peer_list ) ) 38 if (OT_PEERLIST_HASBUCKETS(peer_list))
39 vector_clean_list( (ot_vector*)peer_list->peers.data, peer_list->peers.size ); 39 vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size);
40 else 40 else
41 free( peer_list->peers.data ); 41 free(peer_list->peers.data);
42 } 42 }
43 free( peer_list ); 43 free(peer_list);
44} 44}
45 45
46void add_torrent_from_saved_state( ot_hash const hash, ot_time base, size_t down_count ) { 46void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count) {
47 int exactmatch; 47 int exactmatch;
48 ot_torrent *torrent; 48 ot_torrent *torrent;
49 ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); 49 ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash);
50 50
51 if( !accesslist_hashisvalid( hash ) ) 51 if (!accesslist_hashisvalid(hash))
52 return mutex_bucket_unlock_by_hash( hash, 0 ); 52 return mutex_bucket_unlock_by_hash(hash, 0);
53 53
54 torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 54 torrent = vector_find_or_insert(torrents_list, (void *)hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
55 if( !torrent || exactmatch ) 55 if (!torrent || exactmatch)
56 return mutex_bucket_unlock_by_hash( hash, 0 ); 56 return mutex_bucket_unlock_by_hash(hash, 0);
57 57
58 /* Create a new torrent entry, then */ 58 /* Create a new torrent entry, then */
59 byte_zero( torrent, sizeof( ot_torrent ) ); 59 byte_zero(torrent, sizeof(ot_torrent));
60 memcpy( torrent->hash, hash, sizeof(ot_hash) ); 60 memcpy(torrent->hash, hash, sizeof(ot_hash));
61 61
62 if( !( torrent->peer_list6 = malloc( sizeof (ot_peerlist) ) ) || 62 if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) {
63 !( torrent->peer_list4 = malloc( sizeof (ot_peerlist) ) ) ) { 63 vector_remove_torrent(torrents_list, torrent);
64 vector_remove_torrent( torrents_list, torrent ); 64 return mutex_bucket_unlock_by_hash(hash, 0);
65 return mutex_bucket_unlock_by_hash( hash, 0 );
66 } 65 }
67 66
68 byte_zero( torrent->peer_list6, sizeof( ot_peerlist ) ); 67 byte_zero(torrent->peer_list6, sizeof(ot_peerlist));
69 byte_zero( torrent->peer_list4, sizeof( ot_peerlist ) ); 68 byte_zero(torrent->peer_list4, sizeof(ot_peerlist));
70 torrent->peer_list6->base = base; 69 torrent->peer_list6->base = base;
71 torrent->peer_list4->base = base; 70 torrent->peer_list4->base = base;
72 torrent->peer_list6->down_count = down_count; 71 torrent->peer_list6->down_count = down_count;
73 torrent->peer_list4->down_count = down_count; 72 torrent->peer_list4->down_count = down_count;
74 73
75 return mutex_bucket_unlock_by_hash( hash, 1 ); 74 return mutex_bucket_unlock_by_hash(hash, 1);
76} 75}
77 76
78size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount ) { 77size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount) {
79 int exactmatch, delta_torrentcount = 0; 78 int exactmatch, delta_torrentcount = 0;
80 ot_torrent *torrent; 79 ot_torrent *torrent;
81 ot_peer *peer_dest; 80 ot_peer *peer_dest;
82 ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash ); 81 ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash);
83 ot_peerlist *peer_list; 82 ot_peerlist *peer_list;
84 size_t peer_size; /* initialized in next line */ 83 size_t peer_size; /* initialized in next line */
85 ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size); 84 ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size);
86 85
87 if( !accesslist_hashisvalid( *ws->hash ) ) { 86 if (!accesslist_hashisvalid(*ws->hash)) {
88 mutex_bucket_unlock_by_hash( *ws->hash, 0 ); 87 mutex_bucket_unlock_by_hash(*ws->hash, 0);
89 if( proto == FLAG_TCP ) { 88 if (proto == FLAG_TCP) {
90 const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e"; 89 const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e";
91 memcpy( ws->reply, invalid_hash, strlen( invalid_hash ) ); 90 memcpy(ws->reply, invalid_hash, strlen(invalid_hash));
92 return strlen( invalid_hash ); 91 return strlen(invalid_hash);
93 } 92 }
94 return 0; 93 return 0;
95 } 94 }
96 95
97 torrent = vector_find_or_insert( torrents_list, (void*)ws->hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 96 torrent = vector_find_or_insert(torrents_list, (void *)ws->hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
98 if( !torrent ) { 97 if (!torrent) {
99 mutex_bucket_unlock_by_hash( *ws->hash, 0 ); 98 mutex_bucket_unlock_by_hash(*ws->hash, 0);
100 return 0; 99 return 0;
101 } 100 }
102 101
103 if( !exactmatch ) { 102 if (!exactmatch) {
104 /* Create a new torrent entry, then */ 103 /* Create a new torrent entry, then */
105 byte_zero( torrent, sizeof(ot_torrent)); 104 byte_zero(torrent, sizeof(ot_torrent));
106 memcpy( torrent->hash, *ws->hash, sizeof(ot_hash) ); 105 memcpy(torrent->hash, *ws->hash, sizeof(ot_hash));
107 106
108 if( !( torrent->peer_list6 = malloc( sizeof (ot_peerlist) ) ) || 107 if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) {
109 !( torrent->peer_list4 = malloc( sizeof (ot_peerlist) ) ) ) { 108 vector_remove_torrent(torrents_list, torrent);
110 vector_remove_torrent( torrents_list, torrent ); 109 mutex_bucket_unlock_by_hash(*ws->hash, 0);
111 mutex_bucket_unlock_by_hash( *ws->hash, 0 );
112 return 0; 110 return 0;
113 } 111 }
114 112
115 byte_zero( torrent->peer_list6, sizeof( ot_peerlist ) ); 113 byte_zero(torrent->peer_list6, sizeof(ot_peerlist));
116 byte_zero( torrent->peer_list4, sizeof( ot_peerlist ) ); 114 byte_zero(torrent->peer_list4, sizeof(ot_peerlist));
117 delta_torrentcount = 1; 115 delta_torrentcount = 1;
118 } else 116 } else
119 clean_single_torrent( torrent ); 117 clean_single_torrent(torrent);
120 118
121 torrent->peer_list6->base = g_now_minutes; 119 torrent->peer_list6->base = g_now_minutes;
122 torrent->peer_list4->base = g_now_minutes; 120 torrent->peer_list4->base = g_now_minutes;
@@ -124,99 +122,99 @@ size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstr
124 peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; 122 peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
125 123
126 /* Check for peer in torrent */ 124 /* Check for peer in torrent */
127 peer_dest = vector_find_or_insert_peer( &(peer_list->peers), peer_src, peer_size, &exactmatch ); 125 peer_dest = vector_find_or_insert_peer(&(peer_list->peers), peer_src, peer_size, &exactmatch);
128 if( !peer_dest ) { 126 if (!peer_dest) {
129 mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); 127 mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
130 return 0; 128 return 0;
131 } 129 }
132 130
133 /* Tell peer that it's fresh */ 131 /* Tell peer that it's fresh */
134 OT_PEERTIME( ws->peer, OT_PEER_SIZE6 ) = 0; 132 OT_PEERTIME(ws->peer, OT_PEER_SIZE6) = 0;
135 133
136 /* Sanitize flags: Whoever claims to have completed download, must be a seeder */ 134 /* Sanitize flags: Whoever claims to have completed download, must be a seeder */
137 if( ( OT_PEERFLAG( ws->peer ) & ( PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING ) ) == PEER_FLAG_COMPLETED ) 135 if ((OT_PEERFLAG(ws->peer) & (PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING)) == PEER_FLAG_COMPLETED)
138 OT_PEERFLAG( ws->peer ) ^= PEER_FLAG_COMPLETED; 136 OT_PEERFLAG(ws->peer) ^= PEER_FLAG_COMPLETED;
139 137
140 /* If we hadn't had a match create peer there */ 138 /* If we hadn't had a match create peer there */
141 if( !exactmatch ) { 139 if (!exactmatch) {
142 140
143#ifdef WANT_SYNC_LIVE 141#ifdef WANT_SYNC_LIVE
144 if( proto == FLAG_MCA ) 142 if (proto == FLAG_MCA)
145 OT_PEERFLAG( ws->peer ) |= PEER_FLAG_FROM_SYNC; 143 OT_PEERFLAG(ws->peer) |= PEER_FLAG_FROM_SYNC;
146 else 144 else
147 livesync_tell( ws ); 145 livesync_tell(ws);
148#endif 146#endif
149 147
150 peer_list->peer_count++; 148 peer_list->peer_count++;
151 if( OT_PEERFLAG( ws->peer ) & PEER_FLAG_COMPLETED ) { 149 if (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED) {
152 peer_list->down_count++; 150 peer_list->down_count++;
153 stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws ); 151 stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws);
154 } 152 }
155 if( OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING ) 153 if (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING)
156 peer_list->seed_count++; 154 peer_list->seed_count++;
157 155
158 } else { 156 } else {
159 stats_issue_event( EVENT_RENEW, 0, OT_PEERTIME( peer_dest, peer_size ) ); 157 stats_issue_event(EVENT_RENEW, 0, OT_PEERTIME(peer_dest, peer_size));
160#ifdef WANT_SPOT_WOODPECKER 158#ifdef WANT_SPOT_WOODPECKER
161 if( ( OT_PEERTIME(peer_dest, peer_size) > 0 ) && ( OT_PEERTIME(peer_dest, peer_size) < 20 ) ) 159 if ((OT_PEERTIME(peer_dest, peer_size) > 0) && (OT_PEERTIME(peer_dest, peer_size) < 20))
162 stats_issue_event( EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer ); 160 stats_issue_event(EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer);
163#endif 161#endif
164#ifdef WANT_SYNC_LIVE 162#ifdef WANT_SYNC_LIVE
165 /* Won't live sync peers that come back too fast. Only exception: 163 /* Won't live sync peers that come back too fast. Only exception:
166 fresh "completed" reports */ 164 fresh "completed" reports */
167 if( proto != FLAG_MCA ) { 165 if (proto != FLAG_MCA) {
168 if( OT_PEERTIME( peer_dest, peer_size ) > OT_CLIENT_SYNC_RENEW_BOUNDARY || 166 if (OT_PEERTIME(peer_dest, peer_size) > OT_CLIENT_SYNC_RENEW_BOUNDARY ||
169 ( !(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED ) ) ) 167 (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED)))
170 livesync_tell( ws ); 168 livesync_tell(ws);
171 } 169 }
172#endif 170#endif
173 171
174 if( (OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING ) && !(OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING ) ) 172 if ((OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && !(OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING))
175 peer_list->seed_count--; 173 peer_list->seed_count--;
176 if( !(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING ) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING ) ) 174 if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING))
177 peer_list->seed_count++; 175 peer_list->seed_count++;
178 if( !(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED ) ) { 176 if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED)) {
179 peer_list->down_count++; 177 peer_list->down_count++;
180 stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws ); 178 stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws);
181 } 179 }
182 if( OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED ) 180 if (OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED)
183 OT_PEERFLAG( ws->peer ) |= PEER_FLAG_COMPLETED; 181 OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED;
184 } 182 }
185 183
186 memcpy( peer_dest, peer_src, peer_size ); 184 memcpy(peer_dest, peer_src, peer_size);
187#ifdef WANT_SYNC 185#ifdef WANT_SYNC
188 if( proto == FLAG_MCA ) { 186 if (proto == FLAG_MCA) {
189 mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); 187 mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
190 return 0; 188 return 0;
191 } 189 }
192#endif 190#endif
193 191
194 ws->reply_size = return_peers_for_torrent( ws, torrent, amount, ws->reply, proto ); 192 ws->reply_size = return_peers_for_torrent(ws, torrent, amount, ws->reply, proto);
195 mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); 193 mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
196 return ws->reply_size; 194 return ws->reply_size;
197} 195}
198 196
199static size_t return_peers_all( ot_peerlist *peer_list, size_t peer_size, char *reply ) { 197static size_t return_peers_all(ot_peerlist *peer_list, size_t peer_size, char *reply) {
200 unsigned int bucket, num_buckets = 1; 198 unsigned int bucket, num_buckets = 1;
201 ot_vector * bucket_list = &peer_list->peers; 199 ot_vector *bucket_list = &peer_list->peers;
202 size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); 200 size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
203 size_t result = compare_size * peer_list->peer_count; 201 size_t result = compare_size * peer_list->peer_count;
204 char * r_end = reply + result; 202 char *r_end = reply + result;
205 203
206 if( OT_PEERLIST_HASBUCKETS(peer_list) ) { 204 if (OT_PEERLIST_HASBUCKETS(peer_list)) {
207 num_buckets = bucket_list->size; 205 num_buckets = bucket_list->size;
208 bucket_list = (ot_vector *)bucket_list->data; 206 bucket_list = (ot_vector *)bucket_list->data;
209 } 207 }
210 208
211 for( bucket = 0; bucket<num_buckets; ++bucket ) { 209 for (bucket = 0; bucket < num_buckets; ++bucket) {
212 ot_peer *peers = bucket_list[bucket].data; 210 ot_peer *peers = bucket_list[bucket].data;
213 size_t peer_count = bucket_list[bucket].size; 211 size_t peer_count = bucket_list[bucket].size;
214 while( peer_count-- ) { 212 while (peer_count--) {
215 if( OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING ) { 213 if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING) {
216 r_end -= compare_size; 214 r_end -= compare_size;
217 memcpy( r_end, peers, compare_size); 215 memcpy(r_end, peers, compare_size);
218 } else { 216 } else {
219 memcpy( reply, peers, compare_size ); 217 memcpy(reply, peers, compare_size);
220 reply += compare_size; 218 reply += compare_size;
221 } 219 }
222 peers += peer_size; 220 peers += peer_size;
@@ -225,45 +223,47 @@ static size_t return_peers_all( ot_peerlist *peer_list, size_t peer_size, char *
225 return result; 223 return result;
226} 224}
227 225
228static size_t return_peers_selection( struct ot_workstruct *ws, ot_peerlist *peer_list, size_t peer_size, size_t amount, char *reply ) { 226static size_t return_peers_selection(struct ot_workstruct *ws, ot_peerlist *peer_list, size_t peer_size, size_t amount, char *reply) {
229 unsigned int bucket_offset, bucket_index = 0, num_buckets = 1; 227 unsigned int bucket_offset, bucket_index = 0, num_buckets = 1;
230 ot_vector * bucket_list = &peer_list->peers; 228 ot_vector *bucket_list = &peer_list->peers;
231 unsigned int shifted_pc = peer_list->peer_count; 229 unsigned int shifted_pc = peer_list->peer_count;
232 unsigned int shifted_step = 0; 230 unsigned int shifted_step = 0;
233 unsigned int shift = 0; 231 unsigned int shift = 0;
234 size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); 232 size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
235 size_t result = compare_size * amount; 233 size_t result = compare_size * amount;
236 char * r_end = reply + result; 234 char *r_end = reply + result;
237 235
238 if( OT_PEERLIST_HASBUCKETS(peer_list) ) { 236 if (OT_PEERLIST_HASBUCKETS(peer_list)) {
239 num_buckets = bucket_list->size; 237 num_buckets = bucket_list->size;
240 bucket_list = (ot_vector *)bucket_list->data; 238 bucket_list = (ot_vector *)bucket_list->data;
241 } 239 }
242 240
243 /* Make fixpoint arithmetic as exact as possible */ 241 /* Make fixpoint arithmetic as exact as possible */
244#define MAXPRECBIT (1<<(8*sizeof(int)-3)) 242#define MAXPRECBIT (1 << (8 * sizeof(int) - 3))
245 while( !(shifted_pc & MAXPRECBIT ) ) { shifted_pc <<= 1; shift++; } 243 while (!(shifted_pc & MAXPRECBIT)) {
246 shifted_step = shifted_pc/amount; 244 shifted_pc <<= 1;
245 shift++;
246 }
247 shifted_step = shifted_pc / amount;
247#undef MAXPRECBIT 248#undef MAXPRECBIT
248 249
249 /* Initialize somewhere in the middle of peers so that 250 /* Initialize somewhere in the middle of peers so that
250 fixpoint's aliasing doesn't alway miss the same peers */ 251 fixpoint's aliasing doesn't alway miss the same peers */
251 bucket_offset = nrand48(ws->rand48_state) % peer_list->peer_count; 252 bucket_offset = nrand48(ws->rand48_state) % peer_list->peer_count;
252 253
253 while( amount-- ) { 254 while (amount--) {
254 ot_peer *peer; 255 ot_peer *peer;
255 256
256 /* This is the aliased, non shifted range, next value may fall into */ 257 /* This is the aliased, non shifted range, next value may fall into */
257 unsigned int diff = ( ( ( amount + 1 ) * shifted_step ) >> shift ) - 258 unsigned int diff = (((amount + 1) * shifted_step) >> shift) - ((amount * shifted_step) >> shift);
258 ( ( amount * shifted_step ) >> shift ); 259 bucket_offset += 1 + nrand48(ws->rand48_state) % diff;
259 bucket_offset += 1 + nrand48(ws->rand48_state) % diff;
260 260
261 while( bucket_offset >= bucket_list[bucket_index].size ) { 261 while (bucket_offset >= bucket_list[bucket_index].size) {
262 bucket_offset -= bucket_list[bucket_index].size; 262 bucket_offset -= bucket_list[bucket_index].size;
263 bucket_index = ( bucket_index + 1 ) % num_buckets; 263 bucket_index = (bucket_index + 1) % num_buckets;
264 } 264 }
265 peer = bucket_list[bucket_index].data + peer_size * bucket_offset; 265 peer = bucket_list[bucket_index].data + peer_size * bucket_offset;
266 if( OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_SEEDING ) { 266 if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_SEEDING) {
267 r_end -= compare_size; 267 r_end -= compare_size;
268 memcpy(r_end, peer, compare_size); 268 memcpy(r_end, peer, compare_size);
269 } else { 269 } else {
@@ -274,51 +274,51 @@ static size_t return_peers_selection( struct ot_workstruct *ws, ot_peerlist *pee
274 return result; 274 return result;
275} 275}
276 276
277static size_t return_peers_for_torrent_udp( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply ) { 277static size_t return_peers_for_torrent_udp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) {
278 char *r = reply; 278 char *r = reply;
279 size_t peer_size = peer_size_from_peer6(&ws->peer); 279 size_t peer_size = peer_size_from_peer6(&ws->peer);
280 ot_peerlist *peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; 280 ot_peerlist *peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
281 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; 281 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
282 size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; 282 size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
283 283
284 if( amount > peer_list->peer_count ) 284 if (amount > peer_list->peer_count)
285 amount = peer_list->peer_count; 285 amount = peer_list->peer_count;
286 286
287 *(uint32_t*)(r+0) = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); 287 *(uint32_t *)(r + 0) = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM);
288 *(uint32_t*)(r+4) = htonl( peer_count - seed_count ); 288 *(uint32_t *)(r + 4) = htonl(peer_count - seed_count);
289 *(uint32_t*)(r+8) = htonl( seed_count ); 289 *(uint32_t *)(r + 8) = htonl(seed_count);
290 r += 12; 290 r += 12;
291 291
292 if( amount ) { 292 if (amount) {
293 if( amount == peer_list->peer_count ) 293 if (amount == peer_list->peer_count)
294 r += return_peers_all( peer_list, peer_size, r ); 294 r += return_peers_all(peer_list, peer_size, r);
295 else 295 else
296 r += return_peers_selection( ws, peer_list, peer_size, amount, r ); 296 r += return_peers_selection(ws, peer_list, peer_size, amount, r);
297 } 297 }
298 return r - reply; 298 return r - reply;
299} 299}
300 300
301static size_t return_peers_for_torrent_tcp( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply ) { 301static size_t return_peers_for_torrent_tcp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) {
302 char *r = reply; 302 char *r = reply;
303 int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; 303 int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
304 size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; 304 size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
305 size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count; 305 size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count;
306 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - seed_count; 306 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - seed_count;
307 307
308 /* Simple case: amount of peers in both lists is less than requested, here we return all results */ 308 /* Simple case: amount of peers in both lists is less than requested, here we return all results */
309 size_t amount_v4 = torrent->peer_list4->peer_count; 309 size_t amount_v4 = torrent->peer_list4->peer_count;
310 size_t amount_v6 = torrent->peer_list6->peer_count; 310 size_t amount_v6 = torrent->peer_list6->peer_count;
311 311
312 /* Complex case: both lists have more than enough entries and we need to split between v4 and v6 clients */ 312 /* Complex case: both lists have more than enough entries and we need to split between v4 and v6 clients */
313 if( amount_v4 + amount_v6 > amount ) { 313 if (amount_v4 + amount_v6 > amount) {
314 size_t amount_left, percent_v6 = 0, percent_v4 = 0, left_v6, left_v4; 314 size_t amount_left, percent_v6 = 0, percent_v4 = 0, left_v6, left_v4;
315 const size_t SCALE = 1024; 315 const size_t SCALE = 1024;
316 316
317 /* If possible, fill at least a quarter of peer from each family */ 317 /* If possible, fill at least a quarter of peer from each family */
318 if( amount / 4 <= amount_v4 ) 318 if (amount / 4 <= amount_v4)
319 amount_v4 = amount / 4; 319 amount_v4 = amount / 4;
320 if( amount / 4 <= amount_v6 ) 320 if (amount / 4 <= amount_v6)
321 amount_v6 = amount / 4; 321 amount_v6 = amount / 4;
322 322
323 /* Fill the rest according to which family's pool provides more peers */ 323 /* Fill the rest according to which family's pool provides more peers */
324 amount_left = amount - (amount_v4 + amount_v6); 324 amount_left = amount - (amount_v4 + amount_v6);
@@ -326,37 +326,38 @@ static size_t return_peers_for_torrent_tcp( struct ot_workstruct * ws, ot_torren
326 left_v4 = torrent->peer_list4->peer_count - amount_v4; 326 left_v4 = torrent->peer_list4->peer_count - amount_v4;
327 left_v6 = torrent->peer_list6->peer_count - amount_v6; 327 left_v6 = torrent->peer_list6->peer_count - amount_v6;
328 328
329 if( left_v4 + left_v6 ) { 329 if (left_v4 + left_v6) {
330 percent_v4 = (SCALE * left_v4) / (left_v4 + left_v6); 330 percent_v4 = (SCALE * left_v4) / (left_v4 + left_v6);
331 percent_v6 = (SCALE * left_v6) / (left_v4 + left_v6); 331 percent_v6 = (SCALE * left_v6) / (left_v4 + left_v6);
332 } 332 }
333 333
334 amount_v4 += (amount_left * percent_v4) / SCALE; 334 amount_v4 += (amount_left * percent_v4) / SCALE;
335 amount_v6 += (amount_left * percent_v6) / SCALE; 335 amount_v6 += (amount_left * percent_v6) / SCALE;
336 336
337 /* Integer division rounding can leave out a peer */ 337 /* Integer division rounding can leave out a peer */
338 if( amount_v4 + amount_v6 < amount && amount_v6 < torrent->peer_list6->peer_count ) 338 if (amount_v4 + amount_v6 < amount && amount_v6 < torrent->peer_list6->peer_count)
339 ++amount_v6; 339 ++amount_v6;
340 if( amount_v4 + amount_v6 < amount && amount_v4 < torrent->peer_list4->peer_count ) 340 if (amount_v4 + amount_v6 < amount && amount_v4 < torrent->peer_list4->peer_count)
341 ++amount_v4; 341 ++amount_v4;
342 } 342 }
343 343
344 r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie", seed_count, down_count, peer_count, erval, erval/2 ); 344 r +=
345 sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie", seed_count, down_count, peer_count, erval, erval / 2);
345 346
346 if( amount_v4 ) { 347 if (amount_v4) {
347 r += sprintf( r, PEERS_BENCODED4 "%zd:", OT_PEER_COMPARE_SIZE4 * amount_v4); 348 r += sprintf(r, PEERS_BENCODED4 "%zd:", OT_PEER_COMPARE_SIZE4 * amount_v4);
348 if( amount_v4 == torrent->peer_list4->peer_count ) 349 if (amount_v4 == torrent->peer_list4->peer_count)
349 r += return_peers_all( torrent->peer_list4, OT_PEER_SIZE4, r ); 350 r += return_peers_all(torrent->peer_list4, OT_PEER_SIZE4, r);
350 else 351 else
351 r += return_peers_selection( ws, torrent->peer_list4, OT_PEER_SIZE4, amount_v4, r ); 352 r += return_peers_selection(ws, torrent->peer_list4, OT_PEER_SIZE4, amount_v4, r);
352 } 353 }
353 354
354 if( amount_v6 ) { 355 if (amount_v6) {
355 r += sprintf( r, PEERS_BENCODED6 "%zd:", OT_PEER_COMPARE_SIZE6 * amount_v6); 356 r += sprintf(r, PEERS_BENCODED6 "%zd:", OT_PEER_COMPARE_SIZE6 * amount_v6);
356 if( amount_v6 == torrent->peer_list6->peer_count ) 357 if (amount_v6 == torrent->peer_list6->peer_count)
357 r += return_peers_all( torrent->peer_list6, OT_PEER_SIZE6, r ); 358 r += return_peers_all(torrent->peer_list6, OT_PEER_SIZE6, r);
358 else 359 else
359 r += return_peers_selection( ws, torrent->peer_list6, OT_PEER_SIZE6, amount_v6, r ); 360 r += return_peers_selection(ws, torrent->peer_list6, OT_PEER_SIZE6, amount_v6, r);
360 } 361 }
361 362
362 *r++ = 'e'; 363 *r++ = 'e';
@@ -365,154 +366,159 @@ static size_t return_peers_for_torrent_tcp( struct ot_workstruct * ws, ot_torren
365} 366}
366 367
367/* Compiles a list of random peers for a torrent 368/* Compiles a list of random peers for a torrent
368 * Reply must have enough space to hold: 369 * Reply must have enough space to hold:
369 * 92 + 6 * amount bytes for TCP/IPv4 370 * 92 + 6 * amount bytes for TCP/IPv4
370 * 92 + 18 * amount bytes for TCP/IPv6 371 * 92 + 18 * amount bytes for TCP/IPv6
371 * 12 + 6 * amount bytes for UDP/IPv4 372 * 12 + 6 * amount bytes for UDP/IPv4
372 * 12 + 18 * amount bytes for UDP/IPv6 373 * 12 + 18 * amount bytes for UDP/IPv6
373 * Does not yet check not to return self 374 * Does not yet check not to return self
374*/ 375 */
375size_t return_peers_for_torrent( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ) { 376size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto) {
376 return proto == FLAG_TCP ? return_peers_for_torrent_tcp(ws, torrent, amount, reply) : return_peers_for_torrent_udp(ws, torrent, amount, reply); 377 return proto == FLAG_TCP ? return_peers_for_torrent_tcp(ws, torrent, amount, reply) : return_peers_for_torrent_udp(ws, torrent, amount, reply);
377} 378}
378 379
379/* Fetches scrape info for a specific torrent */ 380/* Fetches scrape info for a specific torrent */
380size_t return_udp_scrape_for_torrent( ot_hash const hash, char *reply ) { 381size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply) {
381 int exactmatch, delta_torrentcount = 0; 382 int exactmatch, delta_torrentcount = 0;
382 ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); 383 ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash);
383 ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 384 ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
384 385
385 if( !exactmatch ) { 386 if (!exactmatch) {
386 memset( reply, 0, 12); 387 memset(reply, 0, 12);
387 } else { 388 } else {
388 uint32_t *r = (uint32_t*) reply; 389 uint32_t *r = (uint32_t *)reply;
389 390
390 if( clean_single_torrent( torrent ) ) { 391 if (clean_single_torrent(torrent)) {
391 vector_remove_torrent( torrents_list, torrent ); 392 vector_remove_torrent(torrents_list, torrent);
392 memset( reply, 0, 12); 393 memset(reply, 0, 12);
393 delta_torrentcount = -1; 394 delta_torrentcount = -1;
394 } else { 395 } else {
395 r[0] = htonl( torrent->peer_list6->seed_count + torrent->peer_list4->seed_count ); 396 r[0] = htonl(torrent->peer_list6->seed_count + torrent->peer_list4->seed_count);
396 r[1] = htonl( torrent->peer_list6->down_count + torrent->peer_list4->down_count ); 397 r[1] = htonl(torrent->peer_list6->down_count + torrent->peer_list4->down_count);
397 r[2] = htonl( torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - 398 r[2] = htonl(torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count);
398 torrent->peer_list6->seed_count - torrent->peer_list4->seed_count);
399 } 399 }
400 } 400 }
401 mutex_bucket_unlock_by_hash( hash, delta_torrentcount ); 401 mutex_bucket_unlock_by_hash(hash, delta_torrentcount);
402 return 12; 402 return 12;
403} 403}
404 404
405/* Fetches scrape info for a specific torrent */ 405/* Fetches scrape info for a specific torrent */
406size_t return_tcp_scrape_for_torrent( ot_hash const *hash_list, int amount, char *reply ) { 406size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply) {
407 char *r = reply; 407 char *r = reply;
408 int exactmatch, i; 408 int exactmatch, i;
409 409
410 r += sprintf( r, "d5:filesd" ); 410 r += sprintf(r, "d5:filesd");
411 411
412 for( i=0; i<amount; ++i ) { 412 for (i = 0; i < amount; ++i) {
413 int delta_torrentcount = 0; 413 int delta_torrentcount = 0;
414 ot_hash const *hash = hash_list + i; 414 ot_hash const *hash = hash_list + i;
415 ot_vector *torrents_list = mutex_bucket_lock_by_hash( *hash ); 415 ot_vector *torrents_list = mutex_bucket_lock_by_hash(*hash);
416 ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 416 ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
417 417
418 if( exactmatch ) { 418 if (exactmatch) {
419 if( clean_single_torrent( torrent ) ) { 419 if (clean_single_torrent(torrent)) {
420 vector_remove_torrent( torrents_list, torrent ); 420 vector_remove_torrent(torrents_list, torrent);
421 delta_torrentcount = -1; 421 delta_torrentcount = -1;
422 } else { 422 } else {
423 *r++='2';*r++='0';*r++=':'; 423 *r++ = '2';
424 memcpy( r, hash, sizeof(ot_hash) ); r+=sizeof(ot_hash); 424 *r++ = '0';
425 r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", 425 *r++ = ':';
426 torrent->peer_list6->seed_count + torrent->peer_list4->seed_count, 426 memcpy(r, hash, sizeof(ot_hash));
427 torrent->peer_list6->down_count + torrent->peer_list4->down_count, 427 r += sizeof(ot_hash);
428 torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - 428 r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", torrent->peer_list6->seed_count + torrent->peer_list4->seed_count,
429 torrent->peer_list6->seed_count - torrent->peer_list4->seed_count); 429 torrent->peer_list6->down_count + torrent->peer_list4->down_count,
430 torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count);
430 } 431 }
431 } 432 }
432 mutex_bucket_unlock_by_hash( *hash, delta_torrentcount ); 433 mutex_bucket_unlock_by_hash(*hash, delta_torrentcount);
433 } 434 }
434 435
435 *r++ = 'e'; *r++ = 'e'; 436 *r++ = 'e';
437 *r++ = 'e';
436 return r - reply; 438 return r - reply;
437} 439}
438 440
439static ot_peerlist dummy_list; 441static ot_peerlist dummy_list;
440size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws ) { 442size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws) {
441 int exactmatch; 443 int exactmatch;
442 ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash ); 444 ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash);
443 ot_torrent *torrent = binary_search( ws->hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 445 ot_torrent *torrent = binary_search(ws->hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
444 ot_peerlist *peer_list = &dummy_list; 446 ot_peerlist *peer_list = &dummy_list;
445 size_t peer_size; /* initialized in next line */ 447 size_t peer_size; /* initialized in next line */
446 ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size); 448 ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size);
447 size_t peer_count = 0, seed_count = 0; 449 size_t peer_count = 0, seed_count = 0;
448 450
449#ifdef WANT_SYNC_LIVE 451#ifdef WANT_SYNC_LIVE
450 if( proto != FLAG_MCA ) { 452 if (proto != FLAG_MCA) {
451 OT_PEERFLAG( ws->peer ) |= PEER_FLAG_STOPPED; 453 OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED;
452 livesync_tell( ws ); 454 livesync_tell(ws);
453 } 455 }
454#endif 456#endif
455 457
456 if( exactmatch ) { 458 if (exactmatch) {
457 peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; 459 peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
458 switch( vector_remove_peer( &peer_list->peers, peer_src, peer_size ) ) { 460 switch (vector_remove_peer(&peer_list->peers, peer_src, peer_size)) {
459 case 2: peer_list->seed_count--; /* Intentional fallthrough */ 461 case 2:
460 case 1: peer_list->peer_count--; /* Intentional fallthrough */ 462 peer_list->seed_count--; /* Intentional fallthrough */
461 default: break; 463 case 1:
464 peer_list->peer_count--; /* Intentional fallthrough */
465 default:
466 break;
462 } 467 }
463 468
464 peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; 469 peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
465 seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; 470 seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
466 } 471 }
467 472
468 473 if (proto == FLAG_TCP) {
469 if( proto == FLAG_TCP ) { 474 int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
470 int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; 475 ws->reply_size = sprintf(ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie%s0:e", seed_count, peer_count - seed_count, erval,
471 ws->reply_size = sprintf( ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie%s0:e", seed_count, peer_count - seed_count, erval, erval / 2, peer_size == OT_PEER_SIZE6 ? PEERS_BENCODED6 : PEERS_BENCODED4 ); 476 erval / 2, peer_size == OT_PEER_SIZE6 ? PEERS_BENCODED6 : PEERS_BENCODED4);
472 } 477 }
473 478
474 /* Handle UDP reply */ 479 /* Handle UDP reply */
475 if( proto == FLAG_UDP ) { 480 if (proto == FLAG_UDP) {
476 ((uint32_t*)ws->reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); 481 ((uint32_t *)ws->reply)[2] = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM);
477 ((uint32_t*)ws->reply)[3] = htonl( peer_count - seed_count ); 482 ((uint32_t *)ws->reply)[3] = htonl(peer_count - seed_count);
478 ((uint32_t*)ws->reply)[4] = htonl( seed_count); 483 ((uint32_t *)ws->reply)[4] = htonl(seed_count);
479 ws->reply_size = 20; 484 ws->reply_size = 20;
480 } 485 }
481 486
482 mutex_bucket_unlock_by_hash( *ws->hash, 0 ); 487 mutex_bucket_unlock_by_hash(*ws->hash, 0);
483 return ws->reply_size; 488 return ws->reply_size;
484} 489}
485 490
486void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ) { 491void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data) {
487 int bucket; 492 int bucket;
488 size_t j; 493 size_t j;
489 494
490 for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 495 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
491 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 496 ot_vector *torrents_list = mutex_bucket_lock(bucket);
492 ot_torrent *torrents = (ot_torrent*)(torrents_list->data); 497 ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
493 498
494 for( j=0; j<torrents_list->size; ++j ) 499 for (j = 0; j < torrents_list->size; ++j)
495 if( for_each( torrents + j, data ) ) 500 if (for_each(torrents + j, data))
496 break; 501 break;
497 502
498 mutex_bucket_unlock( bucket, 0 ); 503 mutex_bucket_unlock(bucket, 0);
499 if( !g_opentracker_running ) return; 504 if (!g_opentracker_running)
505 return;
500 } 506 }
501} 507}
502 508
503ot_peer *peer_from_peer6( ot_peer6 *peer, size_t *peer_size ) { 509ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size) {
504 ot_ip6 *ip = (ot_ip6*)peer; 510 ot_ip6 *ip = (ot_ip6 *)peer;
505 if( !ip6_isv4mapped(ip) ) { 511 if (!ip6_isv4mapped(ip)) {
506 *peer_size = OT_PEER_SIZE6; 512 *peer_size = OT_PEER_SIZE6;
507 return (ot_peer*)peer; 513 return (ot_peer *)peer;
508 } 514 }
509 *peer_size = OT_PEER_SIZE4; 515 *peer_size = OT_PEER_SIZE4;
510 return (ot_peer*)(((uint8_t*)peer) + 12); 516 return (ot_peer *)(((uint8_t *)peer) + 12);
511} 517}
512 518
513size_t peer_size_from_peer6(ot_peer6 *peer) { 519size_t peer_size_from_peer6(ot_peer6 *peer) {
514 ot_ip6 *ip = (ot_ip6*)peer; 520 ot_ip6 *ip = (ot_ip6 *)peer;
515 if( !ip6_isv4mapped(ip)) 521 if (!ip6_isv4mapped(ip))
516 return OT_PEER_SIZE6; 522 return OT_PEER_SIZE6;
517 return OT_PEER_SIZE4; 523 return OT_PEER_SIZE4;
518} 524}
@@ -520,20 +526,20 @@ size_t peer_size_from_peer6(ot_peer6 *peer) {
520#ifdef _DEBUG_RANDOMTORRENTS 526#ifdef _DEBUG_RANDOMTORRENTS
521void trackerlogic_add_random_torrents(size_t amount) { 527void trackerlogic_add_random_torrents(size_t amount) {
522 struct ot_workstruct ws; 528 struct ot_workstruct ws;
523 memset( &ws, 0, sizeof(ws) ); 529 memset(&ws, 0, sizeof(ws));
524 530
525 ws.inbuf=malloc(G_INBUF_SIZE); 531 ws.inbuf = malloc(G_INBUF_SIZE);
526 ws.outbuf=malloc(G_OUTBUF_SIZE); 532 ws.outbuf = malloc(G_OUTBUF_SIZE);
527 ws.reply=ws.outbuf; 533 ws.reply = ws.outbuf;
528 ws.hash=(ot_hash*)ws.inbuf; 534 ws.hash = (ot_hash *)ws.inbuf;
529 535
530 while( amount-- ) { 536 while (amount--) {
531 arc4random_buf(ws.hash, sizeof(ot_hash)); 537 arc4random_buf(ws.hash, sizeof(ot_hash));
532 arc4random_buf(&ws.peer, sizeof(ws.peer)); 538 arc4random_buf(&ws.peer, sizeof(ws.peer));
533 539
534 OT_PEERFLAG(ws.peer) &= PEER_FLAG_SEEDING | PEER_FLAG_COMPLETED | PEER_FLAG_STOPPED; 540 OT_PEERFLAG(ws.peer) &= PEER_FLAG_SEEDING | PEER_FLAG_COMPLETED | PEER_FLAG_STOPPED;
535 541
536 add_peer_to_torrent_and_return_peers( FLAG_TCP, &ws, 1 ); 542 add_peer_to_torrent_and_return_peers(FLAG_TCP, &ws, 1);
537 } 543 }
538 544
539 free(ws.inbuf); 545 free(ws.inbuf);
@@ -541,54 +547,54 @@ void trackerlogic_add_random_torrents(size_t amount) {
541} 547}
542#endif 548#endif
543 549
544void exerr( char * message ) { 550void exerr(char *message) {
545 fprintf( stderr, "%s\n", message ); 551 fprintf(stderr, "%s\n", message);
546 exit( 111 ); 552 exit(111);
547} 553}
548 554
549void trackerlogic_init( ) { 555void trackerlogic_init() {
550 g_tracker_id = random(); 556 g_tracker_id = random();
551 557
552 if( !g_stats_path ) 558 if (!g_stats_path)
553 g_stats_path = "stats"; 559 g_stats_path = "stats";
554 g_stats_path_len = strlen( g_stats_path ); 560 g_stats_path_len = strlen(g_stats_path);
555 561
556 /* Initialise background worker threads */ 562 /* Initialise background worker threads */
557 mutex_init( ); 563 mutex_init();
558 clean_init( ); 564 clean_init();
559 fullscrape_init( ); 565 fullscrape_init();
560 accesslist_init( ); 566 accesslist_init();
561 livesync_init( ); 567 livesync_init();
562 stats_init( ); 568 stats_init();
563} 569}
564 570
565void trackerlogic_deinit( void ) { 571void trackerlogic_deinit(void) {
566 int bucket, delta_torrentcount = 0; 572 int bucket, delta_torrentcount = 0;
567 size_t j; 573 size_t j;
568 574
569 /* Free all torrents... */ 575 /* Free all torrents... */
570 for(bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 576 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
571 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 577 ot_vector *torrents_list = mutex_bucket_lock(bucket);
572 if( torrents_list->size ) { 578 if (torrents_list->size) {
573 for( j=0; j<torrents_list->size; ++j ) { 579 for (j = 0; j < torrents_list->size; ++j) {
574 ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + j; 580 ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + j;
575 free_peerlist( torrent->peer_list6 ); 581 free_peerlist(torrent->peer_list6);
576 free_peerlist( torrent->peer_list4 ); 582 free_peerlist(torrent->peer_list4);
577 delta_torrentcount -= 1; 583 delta_torrentcount -= 1;
578 } 584 }
579 free( torrents_list->data ); 585 free(torrents_list->data);
580 } 586 }
581 mutex_bucket_unlock( bucket, delta_torrentcount ); 587 mutex_bucket_unlock(bucket, delta_torrentcount);
582 } 588 }
583 589
584 /* Deinitialise background worker threads */ 590 /* Deinitialise background worker threads */
585 stats_deinit( ); 591 stats_deinit();
586 livesync_deinit( ); 592 livesync_deinit();
587 accesslist_deinit( ); 593 accesslist_deinit();
588 fullscrape_deinit( ); 594 fullscrape_deinit();
589 clean_deinit( ); 595 clean_deinit();
590 /* Release mutexes */ 596 /* Release mutexes */
591 mutex_deinit( ); 597 mutex_deinit();
592} 598}
593 599
594const char *g_version_trackerlogic_c = "$Source$: $Revision$\n"; 600const char *g_version_trackerlogic_c = "$Source$: $Revision$\n";
diff --git a/trackerlogic.h b/trackerlogic.h
index e43fcdb..022184d 100644
--- a/trackerlogic.h
+++ b/trackerlogic.h
@@ -6,11 +6,11 @@
6#ifndef OT_TRACKERLOGIC_H__ 6#ifndef OT_TRACKERLOGIC_H__
7#define OT_TRACKERLOGIC_H__ 7#define OT_TRACKERLOGIC_H__
8 8
9#include <sys/types.h>
10#include <sys/time.h>
11#include <time.h>
12#include <stdint.h> 9#include <stdint.h>
13#include <stdlib.h> 10#include <stdlib.h>
11#include <sys/time.h>
12#include <sys/types.h>
13#include <time.h>
14 14
15#if defined(__linux__) && defined(WANT_ARC4RANDOM) 15#if defined(__linux__) && defined(WANT_ARC4RANDOM)
16#include <bsd/stdlib.h> 16#include <bsd/stdlib.h>
@@ -22,73 +22,76 @@
22typedef uint8_t ot_hash[20]; 22typedef uint8_t ot_hash[20];
23typedef time_t ot_time; 23typedef time_t ot_time;
24typedef char ot_ip6[16]; 24typedef char ot_ip6[16];
25typedef struct { ot_ip6 address; int bits; } 25typedef struct {
26 ot_net; 26 ot_ip6 address;
27 int bits;
28} ot_net;
27/* List of peers should fit in a single UDP packet (around 1200 bytes) */ 29/* List of peers should fit in a single UDP packet (around 1200 bytes) */
28#define OT_MAX_PEERS_UDP6 66 30#define OT_MAX_PEERS_UDP6 66
29#define OT_MAX_PEERS_UDP4 200 31#define OT_MAX_PEERS_UDP4 200
30 32
31#define OT_IP_SIZE6 16 33#define OT_IP_SIZE6 16
32#define OT_IP_SIZE4 4 34#define OT_IP_SIZE4 4
33#define OT_PORT_SIZE 2 35#define OT_PORT_SIZE 2
34#define OT_FLAG_SIZE 1 36#define OT_FLAG_SIZE 1
35#define OT_TIME_SIZE 1 37#define OT_TIME_SIZE 1
36 38
37/* Some tracker behaviour tunable */ 39/* Some tracker behaviour tunable */
38#define OT_CLIENT_TIMEOUT 30 40#define OT_CLIENT_TIMEOUT 30
39#define OT_CLIENT_TIMEOUT_CHECKINTERVAL 10 41#define OT_CLIENT_TIMEOUT_CHECKINTERVAL 10
40#define OT_CLIENT_TIMEOUT_SEND (60*15) 42#define OT_CLIENT_TIMEOUT_SEND (60 * 15)
41#define OT_CLIENT_REQUEST_INTERVAL (60*30) 43#define OT_CLIENT_REQUEST_INTERVAL (60 * 30)
42#define OT_CLIENT_REQUEST_VARIATION (60*6) 44#define OT_CLIENT_REQUEST_VARIATION (60 * 6)
43 45
44#define OT_TORRENT_TIMEOUT_HOURS 24 46#define OT_TORRENT_TIMEOUT_HOURS 24
45#define OT_TORRENT_TIMEOUT (60*OT_TORRENT_TIMEOUT_HOURS) 47#define OT_TORRENT_TIMEOUT (60 * OT_TORRENT_TIMEOUT_HOURS)
46 48
47#define OT_CLIENT_REQUEST_INTERVAL_RANDOM ( OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION/2 + (int)( nrand48(ws->rand48_state) % OT_CLIENT_REQUEST_VARIATION ) ) 49#define OT_CLIENT_REQUEST_INTERVAL_RANDOM \
50 (OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION / 2 + (int)(nrand48(ws->rand48_state) % OT_CLIENT_REQUEST_VARIATION))
48 51
49/* If WANT_MODEST_FULLSCRAPES is on, ip addresses may not 52/* If WANT_MODEST_FULLSCRAPES is on, ip addresses may not
50 fullscrape more frequently than this amount in seconds */ 53 fullscrape more frequently than this amount in seconds */
51#define OT_MODEST_PEER_TIMEOUT (60*5) 54#define OT_MODEST_PEER_TIMEOUT (60 * 5)
52 55
53/* If peers come back before 10 minutes, don't live sync them */ 56/* If peers come back before 10 minutes, don't live sync them */
54#define OT_CLIENT_SYNC_RENEW_BOUNDARY 10 57#define OT_CLIENT_SYNC_RENEW_BOUNDARY 10
55 58
56/* Number of tracker admin ip addresses allowed */ 59/* Number of tracker admin ip addresses allowed */
57#define OT_ADMINIP_MAX 64 60#define OT_ADMINIP_MAX 64
58#define OT_MAX_THREADS 64 61#define OT_MAX_THREADS 64
59 62
60/* Number of minutes after announce before peer is removed */ 63/* Number of minutes after announce before peer is removed */
61#define OT_PEER_TIMEOUT 45 64#define OT_PEER_TIMEOUT 45
62 65
63/* We maintain a list of 1024 pointers to sorted list of ot_torrent structs 66/* We maintain a list of 1024 pointers to sorted list of ot_torrent structs
64 Sort key is, of course, its hash */ 67 Sort key is, of course, its hash */
65#define OT_BUCKET_COUNT_BITS 10 68#define OT_BUCKET_COUNT_BITS 10
66 69
67#define OT_BUCKET_COUNT (1<<OT_BUCKET_COUNT_BITS) 70#define OT_BUCKET_COUNT (1 << OT_BUCKET_COUNT_BITS)
68#define OT_BUCKET_COUNT_SHIFT (32-OT_BUCKET_COUNT_BITS) 71#define OT_BUCKET_COUNT_SHIFT (32 - OT_BUCKET_COUNT_BITS)
69 72
70/* if _DEBUG_RANDOMTORRENTS is set, this is the amount of torrents to create 73/* if _DEBUG_RANDOMTORRENTS is set, this is the amount of torrents to create
71 on startup */ 74 on startup */
72#define RANDOMTORRENTS (1024*1024*1) 75#define RANDOMTORRENTS (1024 * 1024 * 1)
73 76
74/* From opentracker.c */ 77/* From opentracker.c */
75extern time_t g_now_seconds; 78extern time_t g_now_seconds;
76extern volatile int g_opentracker_running; 79extern volatile int g_opentracker_running;
77#define g_now_minutes (g_now_seconds/60) 80#define g_now_minutes (g_now_seconds / 60)
78 81
79extern uint32_t g_tracker_id; 82extern uint32_t g_tracker_id;
80typedef enum { FLAG_TCP, FLAG_UDP, FLAG_MCA, FLAG_SELFPIPE } PROTO_FLAG; 83typedef enum { FLAG_TCP, FLAG_UDP, FLAG_MCA, FLAG_SELFPIPE } PROTO_FLAG;
81 84
82#define OT_PEER_COMPARE_SIZE6 ((OT_IP_SIZE6)+(OT_PORT_SIZE)) 85#define OT_PEER_COMPARE_SIZE6 ((OT_IP_SIZE6) + (OT_PORT_SIZE))
83#define OT_PEER_COMPARE_SIZE4 ((OT_IP_SIZE4)+(OT_PORT_SIZE)) 86#define OT_PEER_COMPARE_SIZE4 ((OT_IP_SIZE4) + (OT_PORT_SIZE))
84#define OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(PEER_SIZE) ((PEER_SIZE)-(OT_TIME_SIZE)-(OT_FLAG_SIZE)) 87#define OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(PEER_SIZE) ((PEER_SIZE) - (OT_TIME_SIZE) - (OT_FLAG_SIZE))
85 88
86#define OT_PEER_SIZE6 ((OT_TIME_SIZE)+(OT_FLAG_SIZE)+(OT_PEER_COMPARE_SIZE6)) 89#define OT_PEER_SIZE6 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE6))
87#define OT_PEER_SIZE4 ((OT_TIME_SIZE)+(OT_FLAG_SIZE)+(OT_PEER_COMPARE_SIZE4)) 90#define OT_PEER_SIZE4 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE4))
88 91
89typedef uint8_t ot_peer; /* Generic pointer to a v6 or v4 peer */ 92typedef uint8_t ot_peer; /* Generic pointer to a v6 or v4 peer */
90typedef uint8_t ot_peer6[OT_PEER_SIZE6]; 93typedef uint8_t ot_peer6[OT_PEER_SIZE6];
91typedef uint8_t ot_peer4[OT_PEER_SIZE4]; 94typedef uint8_t ot_peer4[OT_PEER_SIZE4];
92static const uint8_t PEER_FLAG_SEEDING = 0x80; 95static const uint8_t PEER_FLAG_SEEDING = 0x80;
93static const uint8_t PEER_FLAG_COMPLETED = 0x40; 96static const uint8_t PEER_FLAG_COMPLETED = 0x40;
94static const uint8_t PEER_FLAG_STOPPED = 0x20; 97static const uint8_t PEER_FLAG_STOPPED = 0x20;
@@ -96,20 +99,20 @@ static const uint8_t PEER_FLAG_FROM_SYNC = 0x10;
96static const uint8_t PEER_FLAG_LEECHING = 0x00; 99static const uint8_t PEER_FLAG_LEECHING = 0x00;
97 100
98/* Takes an ot_peer6 and returns the proper pointer to the peer and sets peer_size */ 101/* Takes an ot_peer6 and returns the proper pointer to the peer and sets peer_size */
99ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size); 102ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size);
100size_t peer_size_from_peer6(ot_peer6 *peer); 103size_t peer_size_from_peer6(ot_peer6 *peer);
101 104
102/* New style */ 105/* New style */
103#define OT_SETIP(peer,ip) memcpy((uint8_t*)(peer),(ip),OT_IP_SIZE6) 106#define OT_SETIP(peer, ip) memcpy((uint8_t *)(peer), (ip), OT_IP_SIZE6)
104#define OT_SETPORT(peer,port) memcpy(((uint8_t*)(peer))+(OT_IP_SIZE6),(port),2) 107#define OT_SETPORT(peer, port) memcpy(((uint8_t *)(peer)) + (OT_IP_SIZE6), (port), 2)
105#define OT_PEERFLAG(peer) (((uint8_t*)(peer))[(OT_IP_SIZE6)+2]) 108#define OT_PEERFLAG(peer) (((uint8_t *)(peer))[(OT_IP_SIZE6) + 2])
106#define OT_PEERFLAG_D(peer,peersize) (((uint8_t*)(peer))[(peersize)-2]) 109#define OT_PEERFLAG_D(peer, peersize) (((uint8_t *)(peer))[(peersize) - 2])
107#define OT_PEERTIME(peer,peersize) (((uint8_t*)(peer))[(peersize)-1]) 110#define OT_PEERTIME(peer, peersize) (((uint8_t *)(peer))[(peersize) - 1])
108 111
109#define PEERS_BENCODED6 "6:peers6" 112#define PEERS_BENCODED6 "6:peers6"
110#define PEERS_BENCODED4 "5:peers" 113#define PEERS_BENCODED4 "5:peers"
111 114
112#define OT_HASH_COMPARE_SIZE (sizeof(ot_hash)) 115#define OT_HASH_COMPARE_SIZE (sizeof(ot_hash))
113 116
114struct ot_peerlist; 117struct ot_peerlist;
115typedef struct ot_peerlist ot_peerlist; 118typedef struct ot_peerlist ot_peerlist;
@@ -122,26 +125,26 @@ typedef struct {
122#include "ot_vector.h" 125#include "ot_vector.h"
123 126
124struct ot_peerlist { 127struct ot_peerlist {
125 ot_time base; 128 ot_time base;
126 size_t seed_count; 129 size_t seed_count;
127 size_t peer_count; 130 size_t peer_count;
128 size_t down_count; 131 size_t down_count;
129/* normal peers vector or 132 /* normal peers vector or
130 pointer to ot_vector[32] buckets if data != NULL and space == 0 133 pointer to ot_vector[32] buckets if data != NULL and space == 0
131*/ 134 */
132 ot_vector peers; 135 ot_vector peers;
133}; 136};
134#define OT_PEERLIST_HASBUCKETS(peer_list) ((peer_list)->peers.size > (peer_list)->peers.space) 137#define OT_PEERLIST_HASBUCKETS(peer_list) ((peer_list)->peers.size > (peer_list)->peers.space)
135 138
136struct ot_workstruct { 139struct ot_workstruct {
137 /* Thread specific, static */ 140 /* Thread specific, static */
138 char *inbuf; 141 char *inbuf;
139#define G_INBUF_SIZE 8192 142#define G_INBUF_SIZE 8192
140 char *outbuf; 143 char *outbuf;
141#define G_OUTBUF_SIZE 8192 144#define G_OUTBUF_SIZE 8192
142#ifdef _DEBUG_HTTPERROR 145#ifdef _DEBUG_HTTPERROR
143 char *debugbuf; 146 char *debugbuf;
144#define G_DEBUGBUF_SIZE 8192 147#define G_DEBUGBUF_SIZE 8192
145#endif 148#endif
146 149
147 /* The peer currently in the working */ 150 /* The peer currently in the working */
@@ -174,34 +177,34 @@ struct ot_workstruct {
174#endif 177#endif
175 178
176#ifdef WANT_SYNC 179#ifdef WANT_SYNC
177#define WANT_SYNC_PARAM( param ) , param 180#define WANT_SYNC_PARAM(param) , param
178#else 181#else
179#define WANT_SYNC_PARAM( param ) 182#define WANT_SYNC_PARAM(param)
180#endif 183#endif
181 184
182#ifdef WANT_LOG_NETWORKS 185#ifdef WANT_LOG_NETWORKS
183#error Live logging networks disabled at the moment. 186#error Live logging networks disabled at the moment.
184#endif 187#endif
185 188
186void trackerlogic_init( void ); 189void trackerlogic_init(void);
187void trackerlogic_deinit( void ); 190void trackerlogic_deinit(void);
188void exerr( char * message ); 191void exerr(char *message);
189 192
190/* add_peer_to_torrent does only release the torrent bucket if from_sync is set, 193/* add_peer_to_torrent does only release the torrent bucket if from_sync is set,
191 otherwise it is released in return_peers_for_torrent */ 194 otherwise it is released in return_peers_for_torrent */
192size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount ); 195size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount);
193size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws ); 196size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws);
194size_t return_tcp_scrape_for_torrent( ot_hash const *hash_list, int amount, char *reply ); 197size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply);
195size_t return_udp_scrape_for_torrent( ot_hash const hash, char *reply ); 198size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply);
196void add_torrent_from_saved_state( ot_hash const hash, ot_time base, size_t down_count ); 199void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count);
197#ifdef _DEBUG_RANDOMTORRENTS 200#ifdef _DEBUG_RANDOMTORRENTS
198void trackerlogic_add_random_torrents(size_t amount); 201void trackerlogic_add_random_torrents(size_t amount);
199#endif 202#endif
200 203
201/* torrent iterator */ 204/* torrent iterator */
202void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ); 205void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data);
203 206
204/* Helper, before it moves to its own object */ 207/* Helper, before it moves to its own object */
205void free_peerlist( ot_peerlist *peer_list ); 208void free_peerlist(ot_peerlist *peer_list);
206 209
207#endif 210#endif