summaryrefslogtreecommitdiff
path: root/trackerlogic.c
diff options
context:
space:
mode:
authorerdgeist <>2010-04-22 22:08:42 +0000
committererdgeist <>2010-04-22 22:08:42 +0000
commitd42bf5a0310b8df4babff645ee91c37e9f994bfe (patch)
tree878712aeae9d2c64dc626b2945f11cac50954258 /trackerlogic.c
parentae9ab769415f30ccb444bb0b0190a5fcf22275e7 (diff)
** struct ot_workstruct gets ritcher (and will become even ritcher soon).
This is where we encapsulate all per-request data from peer to hash to peer_id, so that it is available everywhere without passing hundreds of pointers down the stack. Most functions that do work down the stack now accept an ot_workstruct and some flags. So it can end up in the stats/event-handler where it will be the default parameter in the future. ** peer_id is now being copied by default and moved to ot_workstruct So it is available in stats and subsequent functions. ** sync scrape madness is gone SYNC_SCRAPE was intended to sync tracker state that would normally be lost on restarts i.e. downloaded counts per torrent. The way was to push it in the tracker cloud after finding all neighbouring trackers. This is madness. It never was tested and can be done per tracker by fetching stats/mode=statedump from time to time and starting opentracker with the -l option later. ** livesync thread has its own ot_workstruct now So it can behave like ot_udp and ot_http against trackerlogic.c and get rid of the first half of the embarrassing global variables. The sending half will be fixed soon [tm]. ** stats can log completed events The author recognizes the needs of original content distributors to keep track of the amount of times a work has been downloaded. While not feasible and used on openbittorrent and other open and anonymous tracker installations, a tracker user can now choose to send those events to syslog.
Diffstat (limited to 'trackerlogic.c')
-rw-r--r--trackerlogic.c92
1 files changed, 45 insertions, 47 deletions
diff --git a/trackerlogic.c b/trackerlogic.c
index 5348927..7ae9bb1 100644
--- a/trackerlogic.c
+++ b/trackerlogic.c
@@ -71,36 +71,35 @@ void add_torrent_from_saved_state( ot_hash hash, ot_time base, size_t down_count
71 return mutex_bucket_unlock_by_hash( hash, 1 ); 71 return mutex_bucket_unlock_by_hash( hash, 1 );
72} 72}
73 73
74size_t add_peer_to_torrent_and_return_peers( ot_hash hash, ot_peer *peer, PROTO_FLAG proto, size_t amount, char * reply ) { 74size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount ) {
75 int exactmatch, delta_torrentcount = 0; 75 int exactmatch, delta_torrentcount = 0;
76 size_t reply_size;
77 ot_torrent *torrent; 76 ot_torrent *torrent;
78 ot_peer *peer_dest; 77 ot_peer *peer_dest;
79 ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); 78 ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash );
80 79
81 if( !accesslist_hashisvalid( hash ) ) { 80 if( !accesslist_hashisvalid( *ws->hash ) ) {
82 mutex_bucket_unlock_by_hash( hash, 0 ); 81 mutex_bucket_unlock_by_hash( *ws->hash, 0 );
83 if( proto == FLAG_TCP ) { 82 if( proto == FLAG_TCP ) {
84 const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e"; 83 const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e";
85 memcpy( reply, invalid_hash, strlen( invalid_hash ) ); 84 memcpy( ws->reply, invalid_hash, strlen( invalid_hash ) );
86 return strlen( invalid_hash ); 85 return strlen( invalid_hash );
87 } 86 }
88 return 0; 87 return 0;
89 } 88 }
90 89
91 torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 90 torrent = vector_find_or_insert( torrents_list, (void*)ws->hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
92 if( !torrent ) { 91 if( !torrent ) {
93 mutex_bucket_unlock_by_hash( hash, 0 ); 92 mutex_bucket_unlock_by_hash( *ws->hash, 0 );
94 return 0; 93 return 0;
95 } 94 }
96 95
97 if( !exactmatch ) { 96 if( !exactmatch ) {
98 /* Create a new torrent entry, then */ 97 /* Create a new torrent entry, then */
99 memcpy( torrent->hash, hash, sizeof(ot_hash) ); 98 memcpy( torrent->hash, *ws->hash, sizeof(ot_hash) );
100 99
101 if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { 100 if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) {
102 vector_remove_torrent( torrents_list, torrent ); 101 vector_remove_torrent( torrents_list, torrent );
103 mutex_bucket_unlock_by_hash( hash, 0 ); 102 mutex_bucket_unlock_by_hash( *ws->hash, 0 );
104 return 0; 103 return 0;
105 } 104 }
106 105
@@ -112,76 +111,76 @@ size_t add_peer_to_torrent_and_return_peers( ot_hash hash, ot_peer *peer, PROTO_
112 torrent->peer_list->base = g_now_minutes; 111 torrent->peer_list->base = g_now_minutes;
113 112
114 /* Check for peer in torrent */ 113 /* Check for peer in torrent */
115 peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), peer, &exactmatch ); 114 peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), &ws->peer, &exactmatch );
116 if( !peer_dest ) { 115 if( !peer_dest ) {
117 mutex_bucket_unlock_by_hash( hash, delta_torrentcount ); 116 mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount );
118 return 0; 117 return 0;
119 } 118 }
120 119
121 /* Tell peer that it's fresh */ 120 /* Tell peer that it's fresh */
122 OT_PEERTIME( peer ) = 0; 121 OT_PEERTIME( &ws->peer ) = 0;
123 122
124 /* Sanitize flags: Whoever claims to have completed download, must be a seeder */ 123 /* Sanitize flags: Whoever claims to have completed download, must be a seeder */
125 if( ( OT_PEERFLAG( peer ) & ( PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING ) ) == PEER_FLAG_COMPLETED ) 124 if( ( OT_PEERFLAG( &ws->peer ) & ( PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING ) ) == PEER_FLAG_COMPLETED )
126 OT_PEERFLAG( peer ) ^= PEER_FLAG_COMPLETED; 125 OT_PEERFLAG( &ws->peer ) ^= PEER_FLAG_COMPLETED;
127 126
128 /* If we hadn't had a match create peer there */ 127 /* If we hadn't had a match create peer there */
129 if( !exactmatch ) { 128 if( !exactmatch ) {
130 129
131#ifdef WANT_SYNC_LIVE 130#ifdef WANT_SYNC_LIVE
132 if( proto == FLAG_MCA ) 131 if( proto == FLAG_MCA )
133 OT_PEERFLAG( peer ) |= PEER_FLAG_FROM_SYNC; 132 OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_FROM_SYNC;
134 else 133 else
135 livesync_tell( hash, peer ); 134 livesync_tell( ws );
136#endif 135#endif
137 136
138 torrent->peer_list->peer_count++; 137 torrent->peer_list->peer_count++;
139 if( OT_PEERFLAG(peer) & PEER_FLAG_COMPLETED ) { 138 if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) {
140 torrent->peer_list->down_count++; 139 torrent->peer_list->down_count++;
141 stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)torrent->hash ); 140 stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws );
142 } 141 }
143 if( OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) 142 if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING )
144 torrent->peer_list->seed_count++; 143 torrent->peer_list->seed_count++;
145 144
146 } else { 145 } else {
147 stats_issue_event( EVENT_RENEW, 0, OT_PEERTIME( peer_dest ) ); 146 stats_issue_event( EVENT_RENEW, 0, OT_PEERTIME( peer_dest ) );
148#ifdef WANT_SPOT_WOODPECKER 147#ifdef WANT_SPOT_WOODPECKER
149 if( ( OT_PEERTIME(peer_dest) > 0 ) && ( OT_PEERTIME(peer_dest) < 20 ) ) 148 if( ( OT_PEERTIME(peer_dest) > 0 ) && ( OT_PEERTIME(peer_dest) < 20 ) )
150 stats_issue_event( EVENT_WOODPECKER, 0, (uintptr_t)peer ); 149 stats_issue_event( EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer );
151#endif 150#endif
152#ifdef WANT_SYNC_LIVE 151#ifdef WANT_SYNC_LIVE
153 /* Won't live sync peers that come back too fast. Only exception: 152 /* Won't live sync peers that come back too fast. Only exception:
154 fresh "completed" reports */ 153 fresh "completed" reports */
155 if( proto != FLAG_MCA ) { 154 if( proto != FLAG_MCA ) {
156 if( OT_PEERTIME( peer_dest ) > OT_CLIENT_SYNC_RENEW_BOUNDARY || 155 if( OT_PEERTIME( peer_dest ) > OT_CLIENT_SYNC_RENEW_BOUNDARY ||
157 ( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(peer) & PEER_FLAG_COMPLETED ) ) ) 156 ( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) ) )
158 livesync_tell( hash, peer ); 157 livesync_tell( ws );
159 } 158 }
160#endif 159#endif
161 160
162 if( (OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && !(OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) ) 161 if( (OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && !(OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) )
163 torrent->peer_list->seed_count--; 162 torrent->peer_list->seed_count--;
164 if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && (OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) ) 163 if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) )
165 torrent->peer_list->seed_count++; 164 torrent->peer_list->seed_count++;
166 if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(peer) & PEER_FLAG_COMPLETED ) ) { 165 if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) ) {
167 torrent->peer_list->down_count++; 166 torrent->peer_list->down_count++;
168 stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)torrent->hash ); 167 stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws );
169 } 168 }
170 if( OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) 169 if( OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED )
171 OT_PEERFLAG( peer ) |= PEER_FLAG_COMPLETED; 170 OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED;
172 } 171 }
173 172
174 memcpy( peer_dest, peer, sizeof(ot_peer) ); 173 memcpy( peer_dest, &ws->peer, sizeof(ot_peer) );
175#ifdef WANT_SYNC 174#ifdef WANT_SYNC
176 if( proto == FLAG_MCA ) { 175 if( proto == FLAG_MCA ) {
177 mutex_bucket_unlock_by_hash( hash, delta_torrentcount ); 176 mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount );
178 return 0; 177 return 0;
179 } 178 }
180#endif 179#endif
181 180
182 reply_size = return_peers_for_torrent( torrent, amount, reply, proto ); 181 ws->reply_size = return_peers_for_torrent( torrent, amount, ws->reply, proto );
183 mutex_bucket_unlock_by_hash( torrent->hash, delta_torrentcount ); 182 mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount );
184 return reply_size; 183 return ws->reply_size;
185} 184}
186 185
187static size_t return_peers_all( ot_peerlist *peer_list, char *reply ) { 186static size_t return_peers_all( ot_peerlist *peer_list, char *reply ) {
@@ -350,23 +349,22 @@ size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *repl
350} 349}
351 350
352static ot_peerlist dummy_list; 351static ot_peerlist dummy_list;
353size_t remove_peer_from_torrent( ot_hash hash, ot_peer *peer, char *reply, PROTO_FLAG proto ) { 352size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws ) {
354 int exactmatch; 353 int exactmatch;
355 size_t reply_size = 0; 354 ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash );
356 ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); 355 ot_torrent *torrent = binary_search( ws->hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
357 ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
358 ot_peerlist *peer_list = &dummy_list; 356 ot_peerlist *peer_list = &dummy_list;
359 357
360#ifdef WANT_SYNC_LIVE 358#ifdef WANT_SYNC_LIVE
361 if( proto != FLAG_MCA ) { 359 if( proto != FLAG_MCA ) {
362 OT_PEERFLAG( peer ) |= PEER_FLAG_STOPPED; 360 OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED;
363 livesync_tell( hash, peer ); 361 livesync_tell( ws );
364 } 362 }
365#endif 363#endif
366 364
367 if( exactmatch ) { 365 if( exactmatch ) {
368 peer_list = torrent->peer_list; 366 peer_list = torrent->peer_list;
369 switch( vector_remove_peer( &peer_list->peers, peer ) ) { 367 switch( vector_remove_peer( &peer_list->peers, &ws->peer ) ) {
370 case 2: peer_list->seed_count--; /* Fall throughs intended */ 368 case 2: peer_list->seed_count--; /* Fall throughs intended */
371 case 1: peer_list->peer_count--; /* Fall throughs intended */ 369 case 1: peer_list->peer_count--; /* Fall throughs intended */
372 default: break; 370 default: break;
@@ -375,19 +373,19 @@ size_t remove_peer_from_torrent( ot_hash hash, ot_peer *peer, char *reply, PROTO
375 373
376 if( proto == FLAG_TCP ) { 374 if( proto == FLAG_TCP ) {
377 int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; 375 int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
378 reply_size = sprintf( reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie" PEERS_BENCODED "0:e", peer_list->seed_count, peer_list->peer_count - peer_list->seed_count, erval, erval / 2 ); 376 ws->reply_size = sprintf( ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie" PEERS_BENCODED "0:e", peer_list->seed_count, peer_list->peer_count - peer_list->seed_count, erval, erval / 2 );
379 } 377 }
380 378
381 /* Handle UDP reply */ 379 /* Handle UDP reply */
382 if( proto == FLAG_UDP ) { 380 if( proto == FLAG_UDP ) {
383 ((uint32_t*)reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); 381 ((uint32_t*)ws->reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM );
384 ((uint32_t*)reply)[3] = htonl( peer_list->peer_count - peer_list->seed_count ); 382 ((uint32_t*)ws->reply)[3] = htonl( peer_list->peer_count - peer_list->seed_count );
385 ((uint32_t*)reply)[4] = htonl( peer_list->seed_count); 383 ((uint32_t*)ws->reply)[4] = htonl( peer_list->seed_count);
386 reply_size = 20; 384 ws->reply_size = 20;
387 } 385 }
388 386
389 mutex_bucket_unlock_by_hash( hash, 0 ); 387 mutex_bucket_unlock_by_hash( *ws->hash, 0 );
390 return reply_size; 388 return ws->reply_size;
391} 389}
392 390
393void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ) { 391void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ) {