summaryrefslogtreecommitdiff
path: root/ot_mutex.c
diff options
context:
space:
mode:
Diffstat (limited to 'ot_mutex.c')
-rw-r--r--ot_mutex.c190
1 files changed, 111 insertions, 79 deletions
diff --git a/ot_mutex.c b/ot_mutex.c
index 1212245..3011987 100644
--- a/ot_mutex.c
+++ b/ot_mutex.c
@@ -16,42 +16,39 @@
16#include "uint32.h" 16#include "uint32.h"
17 17
18/* Opentracker */ 18/* Opentracker */
19#include "trackerlogic.h" 19#include "ot_iovec.h"
20#include "ot_mutex.h" 20#include "ot_mutex.h"
21#include "ot_stats.h" 21#include "ot_stats.h"
22#include "trackerlogic.h"
22 23
23/* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */ 24/* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */
24#define MTX_DBG( STRING ) 25#define MTX_DBG(STRING)
25 26
26/* Our global all torrents list */ 27/* Our global all torrents list */
27static ot_vector all_torrents[OT_BUCKET_COUNT]; 28static ot_vector all_torrents[OT_BUCKET_COUNT];
28static pthread_mutex_t bucket_mutex[OT_BUCKET_COUNT]; 29static pthread_mutex_t bucket_mutex[OT_BUCKET_COUNT];
29static size_t g_torrent_count; 30static size_t g_torrent_count;
30 31
31/* Self pipe from opentracker.c */ 32/* Self pipe from opentracker.c */
32extern int g_self_pipe[2]; 33extern int g_self_pipe[2];
33 34
34ot_vector *mutex_bucket_lock( int bucket ) { 35ot_vector *mutex_bucket_lock(int bucket) {
35 pthread_mutex_lock(bucket_mutex + bucket ); 36 pthread_mutex_lock(bucket_mutex + bucket);
36 return all_torrents + bucket; 37 return all_torrents + bucket;
37} 38}
38 39
39ot_vector *mutex_bucket_lock_by_hash( ot_hash hash ) { 40ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash) { return mutex_bucket_lock(uint32_read_big((const char *)hash) >> OT_BUCKET_COUNT_SHIFT); }
40 return mutex_bucket_lock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT );
41}
42 41
43void mutex_bucket_unlock( int bucket, int delta_torrentcount ) { 42void mutex_bucket_unlock(int bucket, int delta_torrentcount) {
44 pthread_mutex_unlock(bucket_mutex + bucket); 43 pthread_mutex_unlock(bucket_mutex + bucket);
45 g_torrent_count += delta_torrentcount; 44 g_torrent_count += delta_torrentcount;
46} 45}
47 46
48void mutex_bucket_unlock_by_hash( ot_hash hash, int delta_torrentcount ) { 47void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount) {
49 mutex_bucket_unlock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount ); 48 mutex_bucket_unlock(uint32_read_big((char *)hash) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount);
50} 49}
51 50
52size_t mutex_get_torrent_count( ) { 51size_t mutex_get_torrent_count() { return g_torrent_count; }
53 return g_torrent_count;
54}
55 52
56/* TaskQueue Magic */ 53/* TaskQueue Magic */
57 54
@@ -64,16 +61,16 @@ struct ot_task {
64 struct ot_task *next; 61 struct ot_task *next;
65}; 62};
66 63
67static ot_taskid next_free_taskid = 1; 64static ot_taskid next_free_taskid = 1;
68static struct ot_task *tasklist; 65static struct ot_task *tasklist;
69static pthread_mutex_t tasklist_mutex; 66static pthread_mutex_t tasklist_mutex;
70static pthread_cond_t tasklist_being_filled; 67static pthread_cond_t tasklist_being_filled;
71 68
72int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) { 69int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype) {
73 struct ot_task ** tmptask, * task; 70 struct ot_task **tmptask, *task;
74 71
75 task = malloc(sizeof( struct ot_task)); 72 task = malloc(sizeof(struct ot_task));
76 if( !task ) 73 if (!task)
77 return -1; 74 return -1;
78 75
79 task->taskid = 0; 76 task->taskid = 0;
@@ -84,98 +81,98 @@ int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) {
84 task->next = 0; 81 task->next = 0;
85 82
86 /* Want exclusive access to tasklist */ 83 /* Want exclusive access to tasklist */
87 pthread_mutex_lock( &tasklist_mutex ); 84 pthread_mutex_lock(&tasklist_mutex);
88 85
89 /* Skip to end of list */ 86 /* Skip to end of list */
90 tmptask = &tasklist; 87 tmptask = &tasklist;
91 while( *tmptask ) 88 while (*tmptask)
92 tmptask = &(*tmptask)->next; 89 tmptask = &(*tmptask)->next;
93 *tmptask = task; 90 *tmptask = task;
94 91
95 /* Inform waiting workers and release lock */ 92 /* Inform waiting workers and release lock */
96 pthread_cond_broadcast( &tasklist_being_filled ); 93 pthread_cond_broadcast(&tasklist_being_filled);
97 pthread_mutex_unlock( &tasklist_mutex ); 94 pthread_mutex_unlock(&tasklist_mutex);
98 return 0; 95 return 0;
99} 96}
100 97
101void mutex_workqueue_canceltask( int64 sock ) { 98void mutex_workqueue_canceltask(int64 sock) {
102 struct ot_task ** task; 99 struct ot_task **task;
103 100
104 /* Want exclusive access to tasklist */ 101 /* Want exclusive access to tasklist */
105 pthread_mutex_lock( &tasklist_mutex ); 102 pthread_mutex_lock(&tasklist_mutex);
106 103
107 for (task = &tasklist; *task; task = &((*task)->next)) 104 for (task = &tasklist; *task; task = &((*task)->next))
108 if ((*task)->sock == sock) { 105 if ((*task)->sock == sock) {
109 struct iovec *iovec = (*task)->iovec; 106 struct iovec *iovec = (*task)->iovec;
110 struct ot_task *ptask = *task; 107 struct ot_task *ptask = *task;
111 int i; 108 int i;
112 109
113 /* Free task's iovec */ 110 /* Free task's iovec */
114 for( i=0; i<(*task)->iovec_entries; ++i ) 111 for (i = 0; i < (*task)->iovec_entries; ++i)
115 free( iovec[i].iov_base ); 112 free(iovec[i].iov_base);
116 113
117 *task = (*task)->next; 114 *task = (*task)->next;
118 free( ptask ); 115 free(ptask);
119 break; 116 break;
120 } 117 }
121 118
122 /* Release lock */ 119 /* Release lock */
123 pthread_mutex_unlock( &tasklist_mutex ); 120 pthread_mutex_unlock(&tasklist_mutex);
124} 121}
125 122
126ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ) { 123ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype) {
127 struct ot_task * task; 124 struct ot_task *task;
128 ot_taskid taskid = 0; 125 ot_taskid taskid = 0;
129 126
130 /* Want exclusive access to tasklist */ 127 /* Want exclusive access to tasklist */
131 pthread_mutex_lock( &tasklist_mutex ); 128 pthread_mutex_lock(&tasklist_mutex);
132 129
133 while( !taskid ) { 130 while (!taskid) {
134 /* Skip to the first unassigned task this worker wants to do */ 131 /* Skip to the first unassigned task this worker wants to do */
135 for (task = tasklist; task; task = task->next) 132 for (task = tasklist; task; task = task->next)
136 if (!task->taskid && ( TASK_CLASS_MASK & task->tasktype ) == *tasktype) { 133 if (!task->taskid && (TASK_CLASS_MASK & task->tasktype) == *tasktype) {
137 /* If we found an outstanding task, assign a taskid to it 134 /* If we found an outstanding task, assign a taskid to it
138 and leave the loop */ 135 and leave the loop */
139 task->taskid = taskid = ++next_free_taskid; 136 task->taskid = taskid = ++next_free_taskid;
140 *tasktype = task->tasktype; 137 *tasktype = task->tasktype;
141 break; 138 break;
142 } 139 }
143 140
144 /* Wait until the next task is being fed */ 141 /* Wait until the next task is being fed */
145 if (!taskid) 142 if (!taskid)
146 pthread_cond_wait( &tasklist_being_filled, &tasklist_mutex ); 143 pthread_cond_wait(&tasklist_being_filled, &tasklist_mutex);
147 } 144 }
148 145
149 /* Release lock */ 146 /* Release lock */
150 pthread_mutex_unlock( &tasklist_mutex ); 147 pthread_mutex_unlock(&tasklist_mutex);
151 148
152 return taskid; 149 return taskid;
153} 150}
154 151
155void mutex_workqueue_pushsuccess( ot_taskid taskid ) { 152void mutex_workqueue_pushsuccess(ot_taskid taskid) {
156 struct ot_task ** task; 153 struct ot_task **task;
157 154
158 /* Want exclusive access to tasklist */ 155 /* Want exclusive access to tasklist */
159 pthread_mutex_lock( &tasklist_mutex ); 156 pthread_mutex_lock(&tasklist_mutex);
160 157
161 for (task = &tasklist; *task; task = &((*task)->next)) 158 for (task = &tasklist; *task; task = &((*task)->next))
162 if ((*task)->taskid == taskid) { 159 if ((*task)->taskid == taskid) {
163 struct ot_task *ptask = *task; 160 struct ot_task *ptask = *task;
164 *task = (*task)->next; 161 *task = (*task)->next;
165 free( ptask ); 162 free(ptask);
166 break; 163 break;
167 } 164 }
168 165
169 /* Release lock */ 166 /* Release lock */
170 pthread_mutex_unlock( &tasklist_mutex ); 167 pthread_mutex_unlock(&tasklist_mutex);
171} 168}
172 169
173int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovec ) { 170int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovec) {
174 struct ot_task * task; 171 struct ot_task *task;
175 const char byte = 'o'; 172 const char byte = 'o';
176 173
177 /* Want exclusive access to tasklist */ 174 /* Want exclusive access to tasklist */
178 pthread_mutex_lock( &tasklist_mutex ); 175 pthread_mutex_lock(&tasklist_mutex);
179 176
180 for (task = tasklist; task; task = task->next) 177 for (task = tasklist; task; task = task->next)
181 if (task->taskid == taskid) { 178 if (task->taskid == taskid) {
@@ -186,55 +183,90 @@ int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iove
186 } 183 }
187 184
188 /* Release lock */ 185 /* Release lock */
189 pthread_mutex_unlock( &tasklist_mutex ); 186 pthread_mutex_unlock(&tasklist_mutex);
190 187
191 io_trywrite( g_self_pipe[1], &byte, 1 ); 188 io_trywrite(g_self_pipe[1], &byte, 1);
192 189
193 /* Indicate whether the worker has to throw away results */ 190 /* Indicate whether the worker has to throw away results */
194 return task ? 0 : -1; 191 return task ? 0 : -1;
195} 192}
196 193
197int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) { 194int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec) {
198 struct ot_task ** task; 195 struct ot_task *task;
199 int64 sock = -1; 196 const char byte = 'o';
200 197
201 /* Want exclusive access to tasklist */ 198 /* Want exclusive access to tasklist */
202 pthread_mutex_lock( &tasklist_mutex ); 199 pthread_mutex_lock(&tasklist_mutex);
203 200
204 for (task = &tasklist; *task; task = &((*task)->next)) 201 for (task = tasklist; task; task = task->next)
205 if ((*task)->tasktype == TASK_DONE) { 202 if (task->taskid == taskid) {
206 struct ot_task *ptask = *task; 203 if (iovec) {
204 if (iovec_append(&task->iovec_entries, &task->iovec, iovec))
205 task->tasktype = TASK_DONE_PARTIAL;
206 else
207 task = NULL;
208 } else
209 task->tasktype = TASK_DONE;
210 break;
211 }
207 212
208 *iovec_entries = (*task)->iovec_entries; 213 /* Release lock */
209 *iovec = (*task)->iovec; 214 pthread_mutex_unlock(&tasklist_mutex);
210 sock = (*task)->sock;
211 215
212 *task = (*task)->next; 216 io_trywrite(g_self_pipe[1], &byte, 1);
213 free( ptask ); 217
218 /* Indicate whether the worker has to throw away results */
219 return task ? 0 : -1;
220}
221
222int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovec, int *is_partial) {
223 struct ot_task **task;
224 int64 sock = -1;
225
226 *is_partial = 0;
227
228 /* Want exclusive access to tasklist */
229 pthread_mutex_lock(&tasklist_mutex);
230
231 for (task = &tasklist; *task; task = &((*task)->next))
232 if (((*task)->tasktype & TASK_CLASS_MASK) == TASK_DONE) {
233 struct ot_task *ptask = *task;
234 *iovec_entries = ptask->iovec_entries;
235 *iovec = ptask->iovec;
236 sock = ptask->sock;
237
238 if ((*task)->tasktype == TASK_DONE) {
239 *task = ptask->next;
240 free(ptask);
241 } else {
242 ptask->iovec_entries = 0;
243 ptask->iovec = NULL;
244 *is_partial = 1;
245 /* Prevent task from showing up immediately again unless new data was added */
246 (*task)->tasktype = TASK_FULLSCRAPE;
247 }
214 break; 248 break;
215 } 249 }
216 250
217 /* Release lock */ 251 /* Release lock */
218 pthread_mutex_unlock( &tasklist_mutex ); 252 pthread_mutex_unlock(&tasklist_mutex);
219 return sock; 253 return sock;
220} 254}
221 255
222void mutex_init( ) { 256void mutex_init() {
223 int i; 257 int i;
224 pthread_mutex_init(&tasklist_mutex, NULL); 258 pthread_mutex_init(&tasklist_mutex, NULL);
225 pthread_cond_init (&tasklist_being_filled, NULL); 259 pthread_cond_init(&tasklist_being_filled, NULL);
226 for (i=0; i < OT_BUCKET_COUNT; ++i) 260 for (i = 0; i < OT_BUCKET_COUNT; ++i)
227 pthread_mutex_init(bucket_mutex + i, NULL); 261 pthread_mutex_init(bucket_mutex + i, NULL);
228 byte_zero( all_torrents, sizeof( all_torrents ) ); 262 byte_zero(all_torrents, sizeof(all_torrents));
229} 263}
230 264
231void mutex_deinit( ) { 265void mutex_deinit() {
232 int i; 266 int i;
233 for (i=0; i < OT_BUCKET_COUNT; ++i) 267 for (i = 0; i < OT_BUCKET_COUNT; ++i)
234 pthread_mutex_destroy(bucket_mutex + i); 268 pthread_mutex_destroy(bucket_mutex + i);
235 pthread_mutex_destroy(&tasklist_mutex); 269 pthread_mutex_destroy(&tasklist_mutex);
236 pthread_cond_destroy(&tasklist_being_filled); 270 pthread_cond_destroy(&tasklist_being_filled);
237 byte_zero( all_torrents, sizeof( all_torrents ) ); 271 byte_zero(all_torrents, sizeof(all_torrents));
238} 272}
239
240const char *g_version_mutex_c = "$Source$: $Revision$\n";