diff options
Diffstat (limited to 'trackerlogic.c')
| -rw-r--r-- | trackerlogic.c | 291 |
1 files changed, 134 insertions, 157 deletions
diff --git a/trackerlogic.c b/trackerlogic.c index 0aca287..faca19b 100644 --- a/trackerlogic.c +++ b/trackerlogic.c | |||
| @@ -7,17 +7,12 @@ | |||
| 7 | #include <stdlib.h> | 7 | #include <stdlib.h> |
| 8 | #include <string.h> | 8 | #include <string.h> |
| 9 | #include <stdio.h> | 9 | #include <stdio.h> |
| 10 | #include <sys/uio.h> | ||
| 11 | #include <arpa/inet.h> | 10 | #include <arpa/inet.h> |
| 12 | #include <sys/types.h> | ||
| 13 | #include <sys/mman.h> | ||
| 14 | #include <unistd.h> | 11 | #include <unistd.h> |
| 15 | #include <time.h> | ||
| 16 | #include <math.h> | ||
| 17 | #include <errno.h> | 12 | #include <errno.h> |
| 13 | #include <stdint.h> | ||
| 18 | 14 | ||
| 19 | /* Libowfat */ | 15 | /* Libowfat */ |
| 20 | #include "scan.h" | ||
| 21 | #include "byte.h" | 16 | #include "byte.h" |
| 22 | #include "io.h" | 17 | #include "io.h" |
| 23 | 18 | ||
| @@ -28,26 +23,26 @@ | |||
| 28 | #include "ot_clean.h" | 23 | #include "ot_clean.h" |
| 29 | #include "ot_accesslist.h" | 24 | #include "ot_accesslist.h" |
| 30 | #include "ot_fullscrape.h" | 25 | #include "ot_fullscrape.h" |
| 31 | #include "ot_sync.h" | ||
| 32 | #include "ot_livesync.h" | 26 | #include "ot_livesync.h" |
| 33 | 27 | ||
| 34 | void free_peerlist( ot_peerlist *peer_list ) { | 28 | void free_peerlist( ot_peerlist *peer_list ) { |
| 35 | size_t i; | 29 | if( peer_list->peers.data ) { |
| 36 | for( i=0; i<OT_POOLS_COUNT; ++i ) | 30 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { |
| 37 | if( peer_list->peers[i].data ) | 31 | ot_vector *bucket_list = (ot_vector*)(peer_list->peers.data); |
| 38 | free( peer_list->peers[i].data ); | 32 | |
| 39 | #ifdef WANT_SYNC_BATCH | 33 | while( peer_list->peers.size-- ) |
| 40 | free( peer_list->changeset.data ); | 34 | free( bucket_list++->data ); |
| 41 | #endif | 35 | } |
| 36 | free( peer_list->peers.data ); | ||
| 37 | } | ||
| 42 | free( peer_list ); | 38 | free( peer_list ); |
| 43 | } | 39 | } |
| 44 | 40 | ||
| 45 | ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( int from_changeset ) ) { | 41 | ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( int from_sync ) ) { |
| 46 | int exactmatch; | 42 | int exactmatch; |
| 47 | ot_torrent *torrent; | 43 | ot_torrent *torrent; |
| 48 | ot_peer *peer_dest; | 44 | ot_peer *peer_dest; |
| 49 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ), *peer_pool; | 45 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); |
| 50 | int base_pool = 0; | ||
| 51 | 46 | ||
| 52 | if( !accesslist_hashisvalid( hash ) ) { | 47 | if( !accesslist_hashisvalid( hash ) ) { |
| 53 | mutex_bucket_unlock_by_hash( hash ); | 48 | mutex_bucket_unlock_by_hash( hash ); |
| @@ -75,106 +70,135 @@ ot_torrent *add_peer_to_torrent( ot_hash *hash, ot_peer *peer WANT_SYNC_PARAM( | |||
| 75 | clean_single_torrent( torrent ); | 70 | clean_single_torrent( torrent ); |
| 76 | 71 | ||
| 77 | /* Timestamp our first pool */ | 72 | /* Timestamp our first pool */ |
| 78 | torrent->peer_list->base = NOW; | 73 | torrent->peer_list->base = g_now_minutes; |
| 74 | |||
| 75 | /* Check for peer in torrent */ | ||
| 76 | peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), peer, &exactmatch ); | ||
| 77 | if( !peer_dest ) { | ||
| 78 | mutex_bucket_unlock_by_hash( hash ); | ||
| 79 | return NULL; | ||
| 80 | } | ||
| 81 | |||
| 82 | /* Tell peer that it's fresh */ | ||
| 83 | OT_PEERTIME( peer ) = 0; | ||
| 79 | 84 | ||
| 80 | /* Sanitize flags: Whoever claims to have completed download, must be a seeder */ | 85 | /* Sanitize flags: Whoever claims to have completed download, must be a seeder */ |
| 81 | if( ( OT_FLAG( peer ) & ( PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING ) ) == PEER_FLAG_COMPLETED ) | 86 | if( ( OT_FLAG( peer ) & ( PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING ) ) == PEER_FLAG_COMPLETED ) |
| 82 | OT_FLAG( peer ) ^= PEER_FLAG_COMPLETED; | 87 | OT_FLAG( peer ) ^= PEER_FLAG_COMPLETED; |
| 83 | 88 | ||
| 84 | #ifdef WANT_SYNC | 89 | /* If we hadn't had a match create peer there */ |
| 85 | if( from_changeset ) { | ||
| 86 | /* Check, whether peer already is in current pool, do nothing if so */ | ||
| 87 | peer_pool = &torrent->peer_list->peers[0]; | ||
| 88 | binary_search( peer, peer_pool->data, peer_pool->size, sizeof(ot_peer), OT_PEER_COMPARE_SIZE, &exactmatch ); | ||
| 89 | if( exactmatch ) { | ||
| 90 | mutex_bucket_unlock_by_hash( hash ); | ||
| 91 | return torrent; | ||
| 92 | } | ||
| 93 | base_pool = 1; | ||
| 94 | if( torrent->peer_list->base < NOW ) | ||
| 95 | torrent->peer_list->base = NOW; | ||
| 96 | } | ||
| 97 | #endif | ||
| 98 | |||
| 99 | peer_pool = &torrent->peer_list->peers[ base_pool ]; | ||
| 100 | peer_dest = vector_find_or_insert( peer_pool, (void*)peer, sizeof( ot_peer ), OT_PEER_COMPARE_SIZE, &exactmatch ); | ||
| 101 | |||
| 102 | /* If we hadn't had a match in current pool, create peer there and | ||
| 103 | remove it from all older pools */ | ||
| 104 | if( !exactmatch ) { | 90 | if( !exactmatch ) { |
| 105 | int i; | ||
| 106 | memmove( peer_dest, peer, sizeof( ot_peer ) ); | ||
| 107 | torrent->peer_list->peer_count++; | ||
| 108 | 91 | ||
| 109 | #ifdef WANT_SYNC_LIVE | 92 | #ifdef WANT_SYNC_LIVE |
| 110 | if( !from_changeset ) | 93 | if( !from_sync ) |
| 111 | livesync_tell( hash, peer, PEER_FLAG_LEECHING ); | 94 | livesync_tell( hash, peer ); |
| 112 | #endif | 95 | #endif |
| 113 | 96 | ||
| 114 | if( OT_FLAG( peer ) & PEER_FLAG_COMPLETED ) | 97 | torrent->peer_list->peer_count++; |
| 98 | if( OT_FLAG(peer) & PEER_FLAG_COMPLETED ) | ||
| 115 | torrent->peer_list->down_count++; | 99 | torrent->peer_list->down_count++; |
| 116 | 100 | if( OT_FLAG(peer) & PEER_FLAG_SEEDING ) | |
| 117 | if( OT_FLAG(peer) & PEER_FLAG_SEEDING ) { | ||
| 118 | torrent->peer_list->seed_counts[ base_pool ]++; | ||
| 119 | torrent->peer_list->seed_count++; | 101 | torrent->peer_list->seed_count++; |
| 120 | } | ||
| 121 | 102 | ||
| 122 | for( i= base_pool + 1; i<OT_POOLS_COUNT; ++i ) { | ||
| 123 | switch( vector_remove_peer( &torrent->peer_list->peers[i], peer, 0 ) ) { | ||
| 124 | case 0: continue; | ||
| 125 | case 2: torrent->peer_list->seed_counts[i]--; | ||
| 126 | torrent->peer_list->seed_count--; | ||
| 127 | case 1: default: | ||
| 128 | torrent->peer_list->peer_count--; | ||
| 129 | mutex_bucket_unlock_by_hash( hash ); | ||
| 130 | stats_issue_event( EVENT_RENEW, 0, i ); | ||
| 131 | return torrent; | ||
| 132 | } | ||
| 133 | } | ||
| 134 | } else { | 103 | } else { |
| 135 | if( (OT_FLAG(peer_dest) & PEER_FLAG_SEEDING ) && !(OT_FLAG(peer) & PEER_FLAG_SEEDING ) ) { | 104 | stats_issue_event( EVENT_RENEW, 0, OT_PEERTIME( peer_dest ) ); |
| 136 | torrent->peer_list->seed_counts[ base_pool ]--; | 105 | |
| 106 | if( (OT_FLAG(peer_dest) & PEER_FLAG_SEEDING ) && !(OT_FLAG(peer) & PEER_FLAG_SEEDING ) ) | ||
| 137 | torrent->peer_list->seed_count--; | 107 | torrent->peer_list->seed_count--; |
| 138 | } | 108 | if( !(OT_FLAG(peer_dest) & PEER_FLAG_SEEDING ) && (OT_FLAG(peer) & PEER_FLAG_SEEDING ) ) |
| 139 | if( !(OT_FLAG(peer_dest) & PEER_FLAG_SEEDING ) && (OT_FLAG(peer) & PEER_FLAG_SEEDING ) ) { | ||
| 140 | torrent->peer_list->seed_counts[ base_pool ]++; | ||
| 141 | torrent->peer_list->seed_count++; | 109 | torrent->peer_list->seed_count++; |
| 142 | } | 110 | if( !(OT_FLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_FLAG(peer) & PEER_FLAG_COMPLETED ) ) |
| 143 | if( !(OT_FLAG( peer_dest ) & PEER_FLAG_COMPLETED ) && (OT_FLAG( peer ) & PEER_FLAG_COMPLETED ) ) | ||
| 144 | torrent->peer_list->down_count++; | 111 | torrent->peer_list->down_count++; |
| 145 | if( OT_FLAG( peer_dest ) & PEER_FLAG_COMPLETED ) | 112 | if( OT_FLAG(peer_dest) & PEER_FLAG_COMPLETED ) |
| 146 | OT_FLAG( peer ) |= PEER_FLAG_COMPLETED; | 113 | OT_FLAG( peer ) |= PEER_FLAG_COMPLETED; |
| 147 | |||
| 148 | stats_issue_event( EVENT_RENEW, 0, base_pool ); | ||
| 149 | memmove( peer_dest, peer, sizeof( ot_peer ) ); | ||
| 150 | } | 114 | } |
| 151 | 115 | ||
| 152 | mutex_bucket_unlock_by_hash( hash ); | 116 | *(uint64_t*)(peer_dest) = *(uint64_t*)(peer); |
| 117 | #ifdef WANT_SYNC | ||
| 118 | /* In order to avoid an unlock/lock between add_peers and return_peers, | ||
| 119 | we only unlock the bucket if return_peers won't do the job: either | ||
| 120 | if we return NULL or if no reply is expected, i.e. when called | ||
| 121 | from livesync code. */ | ||
| 122 | if( from_sync ) | ||
| 123 | mutex_bucket_unlock_by_hash( hash ); | ||
| 124 | #endif | ||
| 153 | return torrent; | 125 | return torrent; |
| 154 | } | 126 | } |
| 155 | 127 | ||
| 128 | static size_t return_peers_all( ot_peerlist *peer_list, char *reply ) { | ||
| 129 | unsigned int bucket, num_buckets = 1; | ||
| 130 | ot_vector * bucket_list = &peer_list->peers; | ||
| 131 | char * r = reply; | ||
| 132 | |||
| 133 | if( OT_PEERLIST_HASBUCKETS(peer_list) ) { | ||
| 134 | num_buckets = bucket_list->size; | ||
| 135 | bucket_list = (ot_vector *)bucket_list->data; | ||
| 136 | } | ||
| 137 | |||
| 138 | for( bucket = 0; bucket<num_buckets; ++bucket ) { | ||
| 139 | ot_peer * peers = (ot_peer*)bucket_list[bucket].data; | ||
| 140 | size_t peer_count = bucket_list[bucket].size; | ||
| 141 | while( peer_count-- ) | ||
| 142 | memmove( r+=6, peers++, 6 ); | ||
| 143 | } | ||
| 144 | |||
| 145 | return r - reply; | ||
| 146 | } | ||
| 147 | |||
| 148 | static size_t return_peers_selection( ot_peerlist *peer_list, size_t amount, char *reply ) { | ||
| 149 | unsigned int bucket_offset, bucket_index = 0, num_buckets = 1; | ||
| 150 | ot_vector * bucket_list = &peer_list->peers; | ||
| 151 | unsigned int shifted_pc = peer_list->peer_count; | ||
| 152 | unsigned int shifted_step = 0; | ||
| 153 | unsigned int shift = 0; | ||
| 154 | char * r = reply; | ||
| 155 | |||
| 156 | if( OT_PEERLIST_HASBUCKETS(peer_list) ) { | ||
| 157 | num_buckets = bucket_list->size; | ||
| 158 | bucket_list = (ot_vector *)bucket_list->data; | ||
| 159 | } | ||
| 160 | |||
| 161 | /* Make fixpoint arithmetic as exact as possible */ | ||
| 162 | #define MAXPRECBIT (1<<(8*sizeof(int)-3)) | ||
| 163 | while( !(shifted_pc & MAXPRECBIT ) ) { shifted_pc <<= 1; shift++; } | ||
| 164 | shifted_step = shifted_pc/amount; | ||
| 165 | #undef MAXPRECBIT | ||
| 166 | |||
| 167 | /* Initialize somewhere in the middle of peers so that | ||
| 168 | fixpoint's aliasing doesn't alway miss the same peers */ | ||
| 169 | bucket_offset = random() % peer_list->peer_count; | ||
| 170 | |||
| 171 | while( amount-- ) { | ||
| 172 | /* This is the aliased, non shifted range, next value may fall into */ | ||
| 173 | unsigned int diff = ( ( ( amount + 1 ) * shifted_step ) >> shift ) - | ||
| 174 | ( ( amount * shifted_step ) >> shift ); | ||
| 175 | bucket_offset += 1 + random() % diff; | ||
| 176 | |||
| 177 | while( bucket_offset >= bucket_list[bucket_index].size ) { | ||
| 178 | bucket_offset -= bucket_list[bucket_index].size; | ||
| 179 | bucket_index = ( bucket_index + 1 ) % num_buckets; | ||
| 180 | } | ||
| 181 | |||
| 182 | memmove( r, ((ot_peer*)bucket_list[bucket_index].data) + bucket_offset, 6 ); | ||
| 183 | r += 6; | ||
| 184 | } | ||
| 185 | return r - reply; | ||
| 186 | } | ||
| 187 | |||
| 156 | /* Compiles a list of random peers for a torrent | 188 | /* Compiles a list of random peers for a torrent |
| 157 | * reply must have enough space to hold 92+6*amount bytes | 189 | * reply must have enough space to hold 92+6*amount bytes |
| 158 | * Selector function can be anything, maybe test for seeds, etc. | ||
| 159 | * RANDOM may return huge values | ||
| 160 | * does not yet check not to return self | 190 | * does not yet check not to return self |
| 191 | * the bucket, torrent resides in has been locked by the | ||
| 192 | add_peer call, the ot_torrent * was gathered from, so we | ||
| 193 | have to unlock it here. | ||
| 161 | */ | 194 | */ |
| 162 | size_t return_peers_for_torrent( ot_hash *hash, size_t amount, char *reply, PROTO_FLAG proto ) { | 195 | size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ) { |
| 163 | char *r = reply; | ||
| 164 | int exactmatch; | ||
| 165 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | ||
| 166 | ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | ||
| 167 | ot_peerlist *peer_list = torrent->peer_list; | 196 | ot_peerlist *peer_list = torrent->peer_list; |
| 168 | size_t index; | 197 | char *r = reply; |
| 169 | |||
| 170 | if( !torrent ) { | ||
| 171 | mutex_bucket_unlock_by_hash( hash ); | ||
| 172 | return 0; | ||
| 173 | } | ||
| 174 | 198 | ||
| 175 | if( peer_list->peer_count < amount ) | 199 | if( amount > peer_list->peer_count ) |
| 176 | amount = peer_list->peer_count; | 200 | amount = peer_list->peer_count; |
| 177 | 201 | ||
| 178 | if( proto == FLAG_TCP ) | 202 | if( proto == FLAG_TCP ) |
| 179 | r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie5:peers%zd:", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count, OT_CLIENT_REQUEST_INTERVAL_RANDOM, 6*amount ); | 203 | r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie5:peers%zd:", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count, OT_CLIENT_REQUEST_INTERVAL_RANDOM, 6*amount ); |
| 180 | else { | 204 | else { |
| @@ -185,40 +209,16 @@ size_t return_peers_for_torrent( ot_hash *hash, size_t amount, char *reply, PROT | |||
| 185 | } | 209 | } |
| 186 | 210 | ||
| 187 | if( amount ) { | 211 | if( amount ) { |
| 188 | unsigned int pool_offset, pool_index = 0;; | 212 | if( amount == peer_list->peer_count ) |
| 189 | unsigned int shifted_pc = peer_list->peer_count; | 213 | r += return_peers_all( peer_list, r ); |
| 190 | unsigned int shifted_step = 0; | 214 | else |
| 191 | unsigned int shift = 0; | 215 | r += return_peers_selection( peer_list, amount, r ); |
| 192 | |||
| 193 | /* Make fixpoint arithmetic as exact as possible */ | ||
| 194 | #define MAXPRECBIT (1<<(8*sizeof(int)-3)) | ||
| 195 | while( !(shifted_pc & MAXPRECBIT ) ) { shifted_pc <<= 1; shift++; } | ||
| 196 | shifted_step = shifted_pc/amount; | ||
| 197 | #undef MAXPRECBIT | ||
| 198 | |||
| 199 | /* Initialize somewhere in the middle of peers so that | ||
| 200 | fixpoint's aliasing doesn't alway miss the same peers */ | ||
| 201 | pool_offset = random() % peer_list->peer_count; | ||
| 202 | |||
| 203 | for( index = 0; index < amount; ++index ) { | ||
| 204 | /* This is the aliased, non shifted range, next value may fall into */ | ||
| 205 | unsigned int diff = ( ( ( index + 1 ) * shifted_step ) >> shift ) - | ||
| 206 | ( ( index * shifted_step ) >> shift ); | ||
| 207 | pool_offset += 1 + random() % diff; | ||
| 208 | |||
| 209 | while( pool_offset >= peer_list->peers[pool_index].size ) { | ||
| 210 | pool_offset -= peer_list->peers[pool_index].size; | ||
| 211 | pool_index = ( pool_index + 1 ) % OT_POOLS_COUNT; | ||
| 212 | } | ||
| 213 | |||
| 214 | memmove( r, ((ot_peer*)peer_list->peers[pool_index].data) + pool_offset, 6 ); | ||
| 215 | r += 6; | ||
| 216 | } | ||
| 217 | } | 216 | } |
| 217 | |||
| 218 | if( proto == FLAG_TCP ) | 218 | if( proto == FLAG_TCP ) |
| 219 | *r++ = 'e'; | 219 | *r++ = 'e'; |
| 220 | 220 | ||
| 221 | mutex_bucket_unlock_by_hash( hash ); | 221 | mutex_bucket_unlock_by_hash( &torrent->hash ); |
| 222 | return r - reply; | 222 | return r - reply; |
| 223 | } | 223 | } |
| 224 | 224 | ||
| @@ -274,64 +274,43 @@ size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *repl | |||
| 274 | return r - reply; | 274 | return r - reply; |
| 275 | } | 275 | } |
| 276 | 276 | ||
| 277 | static ot_peerlist dummy_list; | ||
| 277 | size_t remove_peer_from_torrent( ot_hash *hash, ot_peer *peer, char *reply, PROTO_FLAG proto ) { | 278 | size_t remove_peer_from_torrent( ot_hash *hash, ot_peer *peer, char *reply, PROTO_FLAG proto ) { |
| 278 | int exactmatch; | 279 | int exactmatch; |
| 279 | size_t index; | 280 | size_t reply_size = 0; |
| 280 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | 281 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); |
| 281 | ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 282 | ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); |
| 282 | ot_peerlist *peer_list; | 283 | ot_peerlist *peer_list = &dummy_list; |
| 283 | 284 | ||
| 284 | #ifdef WANT_SYNC_LIVE | 285 | #ifdef WANT_SYNC_LIVE |
| 285 | if( proto != FLAG_MCA ) | 286 | if( proto != FLAG_MCA ) { |
| 286 | livesync_tell( hash, peer, PEER_FLAG_STOPPED ); | 287 | OT_FLAG( peer ) |= PEER_FLAG_STOPPED; |
| 287 | #endif | 288 | livesync_tell( hash, peer ); |
| 288 | |||
| 289 | if( !exactmatch ) { | ||
| 290 | mutex_bucket_unlock_by_hash( hash ); | ||
| 291 | |||
| 292 | if( proto == FLAG_TCP ) | ||
| 293 | return sprintf( reply, "d8:completei0e10:incompletei0e8:intervali%ie5:peers0:e", OT_CLIENT_REQUEST_INTERVAL_RANDOM ); | ||
| 294 | |||
| 295 | /* Create fake packet to satisfy parser on the other end */ | ||
| 296 | if( proto == FLAG_UDP ) { | ||
| 297 | ((uint32_t*)reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); | ||
| 298 | ((uint32_t*)reply)[3] = ((uint32_t*)reply)[4] = 0; | ||
| 299 | return (size_t)20; | ||
| 300 | } | ||
| 301 | |||
| 302 | if( proto == FLAG_MCA ) | ||
| 303 | return 0; | ||
| 304 | } | 289 | } |
| 290 | #endif | ||
| 305 | 291 | ||
| 306 | peer_list = torrent->peer_list; | 292 | if( exactmatch ) { |
| 307 | for( index = 0; index<OT_POOLS_COUNT; ++index ) { | 293 | peer_list = torrent->peer_list; |
| 308 | switch( vector_remove_peer( &peer_list->peers[index], peer, index == 0 ) ) { | 294 | switch( vector_remove_peer( &peer_list->peers, peer ) ) { |
| 309 | case 0: continue; | 295 | case 2: peer_list->seed_count--; /* Fall throughs intended */ |
| 310 | case 2: peer_list->seed_counts[index]--; | 296 | case 1: peer_list->peer_count--; /* Fall throughs intended */ |
| 311 | peer_list->seed_count--; | 297 | default: break; |
| 312 | case 1: default: | ||
| 313 | peer_list->peer_count--; | ||
| 314 | goto exit_loop; | ||
| 315 | } | 298 | } |
| 316 | } | 299 | } |
| 317 | 300 | ||
| 318 | exit_loop: | 301 | if( proto == FLAG_TCP ) |
| 319 | 302 | reply_size = sprintf( reply, "d8:completei%zde10:incompletei%zde8:intervali%ie5:peers0:e", peer_list->seed_count, peer_list->peer_count - peer_list->seed_count, OT_CLIENT_REQUEST_INTERVAL_RANDOM ); | |
| 320 | if( proto == FLAG_TCP ) { | ||
| 321 | size_t reply_size = sprintf( reply, "d8:completei%zde10:incompletei%zde8:intervali%ie5:peers0:e", peer_list->seed_count, peer_list->peer_count - peer_list->seed_count, OT_CLIENT_REQUEST_INTERVAL_RANDOM ); | ||
| 322 | mutex_bucket_unlock_by_hash( hash ); | ||
| 323 | return reply_size; | ||
| 324 | } | ||
| 325 | 303 | ||
| 326 | /* Handle UDP reply */ | 304 | /* Handle UDP reply */ |
| 327 | if( proto == FLAG_UDP ) { | 305 | if( proto == FLAG_UDP ) { |
| 328 | ((uint32_t*)reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); | 306 | ((uint32_t*)reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); |
| 329 | ((uint32_t*)reply)[3] = htonl( peer_list->peer_count - peer_list->seed_count ); | 307 | ((uint32_t*)reply)[3] = htonl( peer_list->peer_count - peer_list->seed_count ); |
| 330 | ((uint32_t*)reply)[4] = htonl( peer_list->seed_count); | 308 | ((uint32_t*)reply)[4] = htonl( peer_list->seed_count); |
| 309 | reply_size = 20; | ||
| 331 | } | 310 | } |
| 332 | 311 | ||
| 333 | mutex_bucket_unlock_by_hash( hash ); | 312 | mutex_bucket_unlock_by_hash( hash ); |
| 334 | return (size_t)20; | 313 | return reply_size; |
| 335 | } | 314 | } |
| 336 | 315 | ||
| 337 | void exerr( char * message ) { | 316 | void exerr( char * message ) { |
| @@ -354,7 +333,6 @@ int trackerlogic_init( const char * const serverdir ) { | |||
| 354 | fullscrape_init( ); | 333 | fullscrape_init( ); |
| 355 | accesslist_init( ); | 334 | accesslist_init( ); |
| 356 | livesync_init( ); | 335 | livesync_init( ); |
| 357 | sync_init( ); | ||
| 358 | stats_init( ); | 336 | stats_init( ); |
| 359 | 337 | ||
| 360 | return 0; | 338 | return 0; |
| @@ -366,7 +344,6 @@ void trackerlogic_deinit( void ) { | |||
| 366 | 344 | ||
| 367 | /* Deinitialise background worker threads */ | 345 | /* Deinitialise background worker threads */ |
| 368 | stats_deinit( ); | 346 | stats_deinit( ); |
| 369 | sync_deinit( ); | ||
| 370 | livesync_init( ); | 347 | livesync_init( ); |
| 371 | accesslist_init( ); | 348 | accesslist_init( ); |
| 372 | fullscrape_deinit( ); | 349 | fullscrape_deinit( ); |
