summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.clang-format246
-rw-r--r--.gitignore2
-rw-r--r--Makefile36
-rw-r--r--man1/opentracker.1142
-rw-r--r--man4/opentracker.conf.486
-rw-r--r--opentracker.c898
-rw-r--r--opentracker.conf.sample52
-rw-r--r--ot_accesslist.c494
-rw-r--r--ot_accesslist.h52
-rw-r--r--ot_clean.c137
-rw-r--r--ot_clean.h10
-rw-r--r--ot_fullscrape.c510
-rw-r--r--ot_fullscrape.h8
-rw-r--r--ot_http.c822
-rw-r--r--ot_http.h18
-rw-r--r--ot_iovec.c98
-rw-r--r--ot_iovec.h11
-rw-r--r--ot_livesync.c205
-rw-r--r--ot_livesync.h16
-rw-r--r--ot_mutex.c372
-rw-r--r--ot_mutex.h113
-rw-r--r--ot_rijndael.c2
-rw-r--r--ot_stats.c992
-rw-r--r--ot_stats.h24
-rw-r--r--ot_sync.c118
-rw-r--r--ot_sync.h8
-rw-r--r--ot_udp.c273
-rw-r--r--ot_udp.h4
-rw-r--r--ot_vector.c242
-rw-r--r--ot_vector.h24
-rw-r--r--proxy.c858
-rw-r--r--scan_urlencoded_query.c99
-rw-r--r--scan_urlencoded_query.h6
-rw-r--r--tests/testsuite2.sh24
-rw-r--r--trackerlogic.c645
-rw-r--r--trackerlogic.h178
36 files changed, 4707 insertions, 3118 deletions
diff --git a/.clang-format b/.clang-format
new file mode 100644
index 0000000..cf3c715
--- /dev/null
+++ b/.clang-format
@@ -0,0 +1,246 @@
1---
2Language: Cpp
3# BasedOnStyle: LLVM
4AccessModifierOffset: -2
5AlignAfterOpenBracket: Align
6AlignArrayOfStructures: None
7AlignConsecutiveAssignments:
8 Enabled: true
9 AcrossEmptyLines: true
10 AcrossComments: true
11 AlignCompound: true
12 AlignFunctionPointers: false
13 PadOperators: true
14AlignConsecutiveBitFields:
15 Enabled: false
16 AcrossEmptyLines: true
17 AcrossComments: true
18 AlignCompound: false
19 AlignFunctionPointers: false
20 PadOperators: false
21AlignConsecutiveDeclarations:
22 Enabled: true
23 AcrossEmptyLines: true
24 AcrossComments: true
25 AlignCompound: true
26 AlignFunctionPointers: false
27 PadOperators: true
28AlignConsecutiveMacros:
29 Enabled: true
30 AcrossEmptyLines: true
31 AcrossComments: true
32 AlignCompound: true
33 AlignFunctionPointers: false
34 PadOperators: false
35AlignConsecutiveShortCaseStatements:
36 Enabled: true
37 AcrossEmptyLines: true
38 AcrossComments: true
39 AlignCaseColons: false
40AlignEscapedNewlines: Right
41AlignOperands: Align
42AlignTrailingComments:
43 Kind: Always
44 OverEmptyLines: 0
45AllowAllArgumentsOnNextLine: true
46AllowAllParametersOfDeclarationOnNextLine: true
47AllowBreakBeforeNoexceptSpecifier: Never
48AllowShortBlocksOnASingleLine: Never
49AllowShortCaseLabelsOnASingleLine: false
50AllowShortCompoundRequirementOnASingleLine: true
51AllowShortEnumsOnASingleLine: true
52AllowShortFunctionsOnASingleLine: All
53AllowShortIfStatementsOnASingleLine: Never
54AllowShortLambdasOnASingleLine: All
55AllowShortLoopsOnASingleLine: false
56AlwaysBreakAfterDefinitionReturnType: None
57AlwaysBreakAfterReturnType: None
58AlwaysBreakBeforeMultilineStrings: false
59AlwaysBreakTemplateDeclarations: MultiLine
60AttributeMacros:
61 - __capability
62BinPackArguments: true
63BinPackParameters: true
64BitFieldColonSpacing: Both
65BraceWrapping:
66 AfterCaseLabel: false
67 AfterClass: false
68 AfterControlStatement: Never
69 AfterEnum: false
70 AfterExternBlock: false
71 AfterFunction: false
72 AfterNamespace: false
73 AfterObjCDeclaration: false
74 AfterStruct: false
75 AfterUnion: false
76 BeforeCatch: false
77 BeforeElse: false
78 BeforeLambdaBody: false
79 BeforeWhile: false
80 IndentBraces: false
81 SplitEmptyFunction: true
82 SplitEmptyRecord: true
83 SplitEmptyNamespace: true
84BreakAdjacentStringLiterals: true
85BreakAfterAttributes: Leave
86BreakAfterJavaFieldAnnotations: false
87BreakArrays: true
88BreakBeforeBinaryOperators: None
89BreakBeforeConceptDeclarations: Always
90BreakBeforeBraces: Attach
91BreakBeforeInlineASMColon: OnlyMultiline
92BreakBeforeTernaryOperators: true
93BreakConstructorInitializers: BeforeColon
94BreakInheritanceList: BeforeColon
95BreakStringLiterals: false
96ColumnLimit: 160
97CommentPragmas: '^ IWYU pragma:'
98CompactNamespaces: false
99ConstructorInitializerIndentWidth: 4
100ContinuationIndentWidth: 4
101Cpp11BracedListStyle: true
102DerivePointerAlignment: false
103DisableFormat: false
104EmptyLineAfterAccessModifier: Never
105EmptyLineBeforeAccessModifier: LogicalBlock
106ExperimentalAutoDetectBinPacking: false
107FixNamespaceComments: true
108ForEachMacros:
109 - foreach
110 - Q_FOREACH
111 - BOOST_FOREACH
112IfMacros:
113 - KJ_IF_MAYBE
114IncludeBlocks: Preserve
115IncludeCategories:
116 - Regex: '^"(llvm|llvm-c|clang|clang-c)/'
117 Priority: 2
118 SortPriority: 0
119 CaseSensitive: false
120 - Regex: '^(<|"(gtest|gmock|isl|json)/)'
121 Priority: 3
122 SortPriority: 0
123 CaseSensitive: false
124 - Regex: '.*'
125 Priority: 1
126 SortPriority: 0
127 CaseSensitive: false
128IncludeIsMainRegex: '(Test)?$'
129IncludeIsMainSourceRegex: ''
130IndentAccessModifiers: false
131IndentCaseBlocks: false
132IndentCaseLabels: false
133IndentExternBlock: AfterExternBlock
134IndentGotoLabels: true
135IndentPPDirectives: None
136IndentRequiresClause: true
137IndentWidth: 2
138IndentWrappedFunctionNames: false
139InsertBraces: false
140InsertNewlineAtEOF: false
141InsertTrailingCommas: None
142IntegerLiteralSeparator:
143 Binary: 0
144 BinaryMinDigits: 0
145 Decimal: 0
146 DecimalMinDigits: 0
147 Hex: 0
148 HexMinDigits: 0
149JavaScriptQuotes: Leave
150JavaScriptWrapImports: true
151KeepEmptyLinesAtTheStartOfBlocks: true
152KeepEmptyLinesAtEOF: false
153LambdaBodyIndentation: Signature
154LineEnding: DeriveLF
155MacroBlockBegin: ''
156MacroBlockEnd: ''
157MaxEmptyLinesToKeep: 1
158NamespaceIndentation: None
159ObjCBinPackProtocolList: Auto
160ObjCBlockIndentWidth: 2
161ObjCBreakBeforeNestedBlockParam: true
162ObjCSpaceAfterProperty: false
163ObjCSpaceBeforeProtocolList: true
164PackConstructorInitializers: BinPack
165PenaltyBreakAssignment: 2
166PenaltyBreakBeforeFirstCallParameter: 19
167PenaltyBreakComment: 300
168PenaltyBreakFirstLessLess: 120
169PenaltyBreakOpenParenthesis: 0
170PenaltyBreakScopeResolution: 500
171PenaltyBreakString: 1000
172PenaltyBreakTemplateDeclaration: 10
173PenaltyExcessCharacter: 1000000
174PenaltyIndentedWhitespace: 0
175PenaltyReturnTypeOnItsOwnLine: 60
176PointerAlignment: Right
177PPIndentWidth: -1
178QualifierAlignment: Leave
179ReferenceAlignment: Pointer
180ReflowComments: true
181RemoveBracesLLVM: false
182RemoveParentheses: Leave
183RemoveSemicolon: false
184RequiresClausePosition: OwnLine
185RequiresExpressionIndentation: OuterScope
186SeparateDefinitionBlocks: Leave
187ShortNamespaceLines: 1
188SkipMacroDefinitionBody: false
189SortIncludes: CaseSensitive
190SortJavaStaticImport: Before
191SortUsingDeclarations: LexicographicNumeric
192SpaceAfterCStyleCast: false
193SpaceAfterLogicalNot: false
194SpaceAfterTemplateKeyword: true
195SpaceAroundPointerQualifiers: Default
196SpaceBeforeAssignmentOperators: true
197SpaceBeforeCaseColon: false
198SpaceBeforeCpp11BracedList: false
199SpaceBeforeCtorInitializerColon: true
200SpaceBeforeInheritanceColon: true
201SpaceBeforeJsonColon: false
202SpaceBeforeParens: ControlStatements
203SpaceBeforeParensOptions:
204 AfterControlStatements: true
205 AfterForeachMacros: true
206 AfterFunctionDefinitionName: false
207 AfterFunctionDeclarationName: false
208 AfterIfMacros: true
209 AfterOverloadedOperator: false
210 AfterPlacementOperator: true
211 AfterRequiresInClause: false
212 AfterRequiresInExpression: false
213 BeforeNonEmptyParentheses: false
214SpaceBeforeRangeBasedForLoopColon: true
215SpaceBeforeSquareBrackets: false
216SpaceInEmptyBlock: false
217SpacesBeforeTrailingComments: 1
218SpacesInAngles: Never
219SpacesInContainerLiterals: true
220SpacesInLineCommentPrefix:
221 Minimum: 1
222 Maximum: -1
223SpacesInParens: Never
224SpacesInParensOptions:
225 InCStyleCasts: false
226 InConditionalStatements: false
227 InEmptyParentheses: false
228 Other: false
229SpacesInSquareBrackets: false
230Standard: Latest
231StatementAttributeLikeMacros:
232 - Q_EMIT
233StatementMacros:
234 - Q_UNUSED
235 - QT_REQUIRE_VERSION
236TabWidth: 8
237UseTab: Never
238VerilogBreakBetweenInstancePorts: true
239WhitespaceSensitiveMacros:
240 - BOOST_PP_STRINGIZE
241 - CF_SWIFT_NAME
242 - NS_SWIFT_NAME
243 - PP_STRINGIZE
244 - STRINGIZE
245...
246
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..874c63c
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,2 @@
1*.o
2
diff --git a/Makefile b/Makefile
index da2c8f1..e5ca6e4 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,5 @@
1# $Id$ 1# $Id$
2 2
3CC?=gcc
4
5# Linux flavour 3# Linux flavour
6# PREFIX?=/opt/diet 4# PREFIX?=/opt/diet
7# LIBOWFAT_HEADERS=$(PREFIX)/include 5# LIBOWFAT_HEADERS=$(PREFIX)/include
@@ -18,16 +16,22 @@ LIBOWFAT_HEADERS=$(PREFIX)/libowfat
18LIBOWFAT_LIBRARY=$(PREFIX)/libowfat 16LIBOWFAT_LIBRARY=$(PREFIX)/libowfat
19 17
20BINDIR?=$(PREFIX)/bin 18BINDIR?=$(PREFIX)/bin
19STRIP?=strip
21 20
22#FEATURES+=-DWANT_V6 21#FEATURES+=-DWAND_V4_ONLY
23
24#FEATURES+=-DWANT_ACCESSLIST_BLACK 22#FEATURES+=-DWANT_ACCESSLIST_BLACK
25#FEATURES+=-DWANT_ACCESSLIST_WHITE 23#FEATURES+=-DWANT_ACCESSLIST_WHITE
24#FEATURES+=-DWANT_DYNAMIC_ACCESSLIST
26 25
27#FEATURES+=-DWANT_SYNC_LIVE 26#FEATURES+=-DWANT_SYNC_LIVE
28#FEATURES+=-DWANT_IP_FROM_QUERY_STRING 27#FEATURES+=-DWANT_IP_FROM_QUERY_STRING
29#FEATURES+=-DWANT_COMPRESSION_GZIP 28FEATURES+=-DWANT_COMPRESSION_GZIP
30#FEATURES+=-DWANT_COMPRESSION_GZIP_ALWAYS 29FEATURES+=-DWANT_COMPRESSION_GZIP_ALWAYS
30
31#FEATURES+=-DWANT_COMPRESSION_ZSTD
32#FEATURES+=-DWANT_COMPRESSION_ZSTD_ALWAYS
33#LDFLAGS+=-lzstd
34
31#FEATURES+=-DWANT_LOG_NETWORKS 35#FEATURES+=-DWANT_LOG_NETWORKS
32#FEATURES+=-DWANT_RESTRICT_STATS 36#FEATURES+=-DWANT_RESTRICT_STATS
33#FEATURES+=-DWANT_IP_FROM_PROXY 37#FEATURES+=-DWANT_IP_FROM_PROXY
@@ -39,13 +43,25 @@ BINDIR?=$(PREFIX)/bin
39#FEATURES+=-DWANT_DEV_RANDOM 43#FEATURES+=-DWANT_DEV_RANDOM
40FEATURES+=-DWANT_FULLSCRAPE 44FEATURES+=-DWANT_FULLSCRAPE
41 45
46# You need libowfat version 0.34 to allow for automatic release of chunks during
47# full scrape transfer, if you rely on an older versions, enable this flag
48#FEATURES+=-DWANT_NO_AUTO_FREE
49
50# Is enabled on BSD systems by default in trackerlogic.h
51# on Linux systems you will need -lbds
52#FEATURES+=-DWANT_ARC4RANDOM
53
42#FEATURES+=-D_DEBUG_HTTPERROR 54#FEATURES+=-D_DEBUG_HTTPERROR
55#FEATURES+=-D_DEBUG_RANDOMTORRENTS
56
57GIT_VERSION=$(shell sh -c 'command -v git >/dev/null && test -d .git && git rev-parse HEAD || echo _git_or_commit_not_found_')
43 58
44OPTS_debug=-D_DEBUG -g -ggdb # -pg -fprofile-arcs -ftest-coverage 59OPTS_debug=-D_DEBUG -g -ggdb # -pg -fprofile-arcs -ftest-coverage
45OPTS_production=-O3 60OPTS_production=-O3
46 61
47CFLAGS+=-I$(LIBOWFAT_HEADERS) -Wall -pipe -Wextra #-ansi -pedantic 62CFLAGS+=-I$(LIBOWFAT_HEADERS) -DGIT_VERSION=$(GIT_VERSION) -Wall -pipe -pthread -Wextra #-ansi -pedantic
48LDFLAGS+=-L$(LIBOWFAT_LIBRARY) -lowfat -pthread -lpthread -lz 63LDFLAGS+=-L$(LIBOWFAT_LIBRARY) -lowfat -pthread -lz
64#LDFLAGS+=-lbsd
49 65
50BINARY =opentracker 66BINARY =opentracker
51HEADERS=trackerlogic.h scan_urlencoded_query.h ot_mutex.h ot_stats.h ot_vector.h ot_clean.h ot_udp.h ot_iovec.h ot_fullscrape.h ot_accesslist.h ot_http.h ot_livesync.h ot_rijndael.h 67HEADERS=trackerlogic.h scan_urlencoded_query.h ot_mutex.h ot_stats.h ot_vector.h ot_clean.h ot_udp.h ot_iovec.h ot_fullscrape.h ot_accesslist.h ot_http.h ot_livesync.h ot_rijndael.h
@@ -66,7 +82,7 @@ CFLAGS_debug = $(CFLAGS) $(OPTS_debug) $(FEATURES)
66 82
67$(BINARY): $(OBJECTS) $(HEADERS) 83$(BINARY): $(OBJECTS) $(HEADERS)
68 $(CC) -o $@ $(OBJECTS) $(LDFLAGS) 84 $(CC) -o $@ $(OBJECTS) $(LDFLAGS)
69 strip $@ 85 $(STRIP) $@
70$(BINARY).debug: $(OBJECTS_debug) $(HEADERS) 86$(BINARY).debug: $(OBJECTS_debug) $(HEADERS)
71 $(CC) -o $@ $(OBJECTS_debug) $(LDFLAGS) 87 $(CC) -o $@ $(OBJECTS_debug) $(LDFLAGS)
72proxy: $(OBJECTS_proxy) $(HEADERS) 88proxy: $(OBJECTS_proxy) $(HEADERS)
@@ -84,4 +100,4 @@ clean:
84 rm -rf opentracker opentracker.debug *.o *~ 100 rm -rf opentracker opentracker.debug *.o *~
85 101
86install: 102install:
87 install -m 755 opentracker $(BINDIR) 103 install -m 755 opentracker $(DESTDIR)$(BINDIR)
diff --git a/man1/opentracker.1 b/man1/opentracker.1
new file mode 100644
index 0000000..85ded7b
--- /dev/null
+++ b/man1/opentracker.1
@@ -0,0 +1,142 @@
1.Dd 15/4/2024
2.Dt opentracker 1
3.Os Unix
4.Sh opentracker
5.Nm opentracker
6.Nd a free and open bittorrent tracker
7.Sh SYNOPSIS
8.Nm
9.Op Fl f Ar config
10.Op Fl i Ar ip-select
11.Op Fl p Ar port-bind-tcp
12.Op Fl P Ar port-bind-udp
13.Op Fl A Ar blessed-ip
14.Op Fl r Ar redirect-url
15.Op Fl d Ar chdir
16.Op Fl u Ar user
17.Op Fl w| Fl b accesslist
18.Sh DESCRIPTION
19.Nm
20is a bittorrent tracker that implements announce and scrape actions over the
21UDP and the plain http protocol, aiming for minimal resource usage.
22.Pp
23
24When invoked with parameters, it binds to TCP and UDP port 6969 on all
25interfaces. The recommended way to configure opentracker is by providing a
26config file using the
27.Op Fl f Ar config
28option. See
29.Xr opentracker.conf 4
30for details.
31.Pp
32
33.Sh OPTIONS
34The following options are available:
35
36.Bl -tag -width -indent=8
37.It Fl f Ar config
38Parse a config file with a list of options. Consecutive command options
39will override options from the config file. See
40.Xr opentracker.conf 4
41for details.
42
43.It Fl i Ar ip-select
44Select an ip address that will be used with the next
45.Op Fl p
46or
47.Op Fl P
48command to actually bind to this address. Setting this option without any bind
49options in the config file or
50.Op Fl p
51or
52.Op Fl P
53commands will limit opentracker to only bind to this address.
54.It Fl p Ar port-bind-tcp
55Bind to the TCP port on the last preceding ip address set with the
56.Op Fl i ip-select
57option or to all available addresses if none has been set. Can be given multiple
58times.
59.It Fl P Ar port-bind-udp
60Bind to the UDP port on the last preceding ip address set with the
61.Op Fl i ip-select
62option or to all available addresses if none has been set. Can be given multiple
63times.
64.It Fl A Ar blessed-ip
65Set an ip address in IPv4 or IPv6 or a net in CIDR notation to bless the network
66for access to restricted resources.
67.It Fl r Ar redirect-url
68Set the URL that
69.Nm
70will redirect users to when the / address is requested via HTTP.
71.It Fl d Ar chdir
72Sets the directory
73.Nm
74will
75.Xr chroot 2
76to if ran as root or
77.Xr chdir 2
78to if ran as unprivileged user. Note that any accesslist files need to be
79relative to and within that directory.
80.It Fl u Ar user
81User to run
82.Nm
83under after all operations that need privileges have finished.
84.It Fl w Ar accesslist | Fl b Ar accesslist
85If
86.Nm
87has been compiled with the
88.B WANT_ACCESSLIST_BLACK
89or
90.Br WANT_ACCESSLIST_WHITE
91options, this option sets the location of the accesslist.
92.El
93
94.Sh EXAMPLES
95
96Start
97.Nm
98bound on UDP and TCP ports 6969 on IPv6 localhost.
99
100.Dl # ./opentracker -i ::1 -p 6969 -P 6969
101
102.Pp
103Start
104.Nm
105bound on UDP port 6868 and TCP port 6868 on IPv4 localhost and allow
106privileged access from the network 192.168/16 while redirecting
107HTTP clients accessing the root directory, which is not covered by the
108bittorrent tracker protocol, to https://my-trackersite.com/.
109
110.Dl # ./opentracker -i 192.168.0.4 -p 6868 -P 6969 -A 192.168/16 -r https://my-trackersite.com/
111
112The announce URLs are http://192.168.0.4:6868/announce and
113udp://192.168.0.4:6868/announce respectively.
114
115.Sh FILES
116.Bl -tag -width indent
117.It Pa opentracker.conf
118The
119.Nm
120config file.
121.El
122.Sh SEE ALSO
123.Xr opentracker.conf 4
124.Pp
125opentracker documentation
126.Lk https://erdgeist.org/arts/software/opentracker
127.Pp
128Bittorrent tracker protocol
129.Lk http://www.bittorrent.org/beps/bep_0015.html
130.Sh
131.Sh AUTHOR
132.An Dirk Engling
133.Aq Mt erdgeist@erdgeist.org .
134.Sh LICENSE
135This software is released under the Beerware License:
136
137Permission is hereby granted, free of charge, to any person obtaining a copy of this software
138and associated documentation files (the "Software"), to deal in the Software with the following
139terms and conditions:
140
141If you meet the author(s) someday, and you think this software is worth it, you can buy them
142a beer in return.
diff --git a/man4/opentracker.conf.4 b/man4/opentracker.conf.4
new file mode 100644
index 0000000..b4f5f51
--- /dev/null
+++ b/man4/opentracker.conf.4
@@ -0,0 +1,86 @@
1.Dd 2024-04-18
2.Dt opentracker.conf 5
3.Os Unix
4.Sh NAME
5.Nm opentracker.conf
6.Nd configuration file for opentracker
7.Sh SYNOPSIS
8.Nm
9.Sh DESCRIPTION
10The
11.Nm
12configuration file specifies various options for configuring the behavior of the opentracker program.
13.Pp
14Lines starting with '#' are comments and are ignored. Options are specified as 'keyword value' pairs.
15.Pp
16The following options are available:
17
18.Bl -tag -width ".It access.proxy" -compact
19.It listen.tcp_udp Ar address
20Specifies an address opentracker will listen on for both TCP and UDP connections. If none are specified, opentracker listens on 0.0.0.0:6969 by default. Can be added more than once.
21
22.It listen.tcp Ar address
23Specifies the address opentracker will listen on for TCP connections. Can be added more than once.
24
25.It listen.udp Ar address
26Specifies the address opentracker will listen on for UDP connections. Can be added more than once.
27
28.It listen.udp.workers Ar threads
29Specifies how many threads will be spawned to handle UDP connections. Defaults to 4.
30
31.It access.whitelist Ar path/to/whitelist
32Specifies the path to the whitelist file containing all torrent hashes that opentracker will serve. Use this option if opentracker runs in a non-open mode.
33
34.It access.blacklist Ar path/to/blacklist
35Specifies the path to the blacklist file containing all torrent hashes that opentracker will not serve. Use this option if opentracker was compiled to allow blacklisting.
36
37.It access.fifo_add Ar path/to/adder.fifo
38Specifies the path to the FIFO (named pipe) used for dynamic changesets to accesslists. Info hashes written to this FIFO will be added to the main accesslist file.
39
40.It access.fifo_delete Ar path/to/deleter.fifo
41Specifies the path to the FIFO (named pipe) used for dynamic changesets to accesslists. Info hashes written to this FIFO will be removed from the main accesslist file.
42
43.It access.stats Ar ip_address_or_network
44Specifies the IP address or network in CIDR notation allowed to fetch stats from opentracker.
45
46.It access.stats_path Ar path
47Specifies the path to the stats location. You can configure opentracker to appear anywhere on your tracker. Defaults to /stats.
48
49.It access.proxy Ar ip_address_or_network
50Specifies the IP address or network of the reverse proxies. Opentracker will take the X-Forwarded-For address instead of the source IP address. Can be added more than once.
51
52.It livesync.cluster.listen Ar ip_address:port
53Specifies the IP address and port opentracker will listen on for incoming live sync packets to keep a cluster of opentrackers synchronized.
54
55.It livesync.cluster.node_ip Ar ip_address
56Specifies one trusted IP address for sync between trackers running in a cluster. Can be added more than once.
57
58.It batchsync.cluster.admin_ip Ar ip_address
59Specifies the admin IP address for old-style (HTTP-based) asynchronous tracker syncing.
60
61.It tracker.rootdir Ar path
62Specifies the directory opentracker will chroot/chdir to. All black/white list files must be located in this directory.
63
64.It tracker.user Ar username
65Specifies the user opentracker will setuid to after binding to potentially privileged ports.
66
67.It tracker.redirect_url Ar URL
68Specifies the URL opentracker will redirect to in response to a "GET / HTTP" request.
69
70.Sh EXAMPLES
71To specify the address opentracker will listen on for both TCP and UDP connections:
72.Dl listen.tcp_udp 0.0.0.0:6969
73.Pp
74To specify the address opentracker will listen on for TCP connections:
75.Dl listen.tcp 0.0.0.0
76.Pp
77To specify the address opentracker will listen on for UDP connections:
78.Dl listen.udp 0.0.0.0:6969
79
80.Sh SEE ALSO
81.Xr opentracker 1
82
83.Sh AUTHOR
84.An Dirk Engling
85.Aq Mt erdgeist@erdgeist.org
86
diff --git a/opentracker.c b/opentracker.c
index 09010c7..14e9989 100644
--- a/opentracker.c
+++ b/opentracker.c
@@ -5,59 +5,59 @@
5 $Id$ */ 5 $Id$ */
6 6
7/* System */ 7/* System */
8#include <stdlib.h>
9#include <string.h>
10#include <arpa/inet.h> 8#include <arpa/inet.h>
11#include <sys/socket.h> 9#include <ctype.h>
12#include <unistd.h>
13#include <errno.h> 10#include <errno.h>
11#include <pthread.h>
12#include <pwd.h>
14#include <signal.h> 13#include <signal.h>
15#include <stdio.h> 14#include <stdio.h>
16#include <pwd.h> 15#include <stdlib.h>
17#include <ctype.h> 16#include <string.h>
18#include <pthread.h> 17#include <sys/socket.h>
18#include <unistd.h>
19#ifdef WANT_SYSLOGS 19#ifdef WANT_SYSLOGS
20#include <syslog.h> 20#include <syslog.h>
21#endif 21#endif
22 22
23/* Libowfat */ 23/* Libowfat */
24#include "socket.h" 24#include "byte.h"
25#include "io.h" 25#include "io.h"
26#include "iob.h" 26#include "iob.h"
27#include "byte.h"
28#include "scan.h"
29#include "ip6.h" 27#include "ip6.h"
28#include "scan.h"
29#include "socket.h"
30 30
31/* Opentracker */ 31/* Opentracker */
32#include "trackerlogic.h"
33#include "ot_mutex.h"
34#include "ot_http.h"
35#include "ot_udp.h"
36#include "ot_accesslist.h" 32#include "ot_accesslist.h"
37#include "ot_stats.h" 33#include "ot_http.h"
38#include "ot_livesync.h" 34#include "ot_livesync.h"
35#include "ot_mutex.h"
36#include "ot_stats.h"
37#include "ot_udp.h"
38#include "trackerlogic.h"
39 39
40/* Globals */ 40/* Globals */
41time_t g_now_seconds; 41time_t g_now_seconds;
42char * g_redirecturl; 42char *g_redirecturl;
43uint32_t g_tracker_id; 43uint32_t g_tracker_id;
44volatile int g_opentracker_running = 1; 44volatile int g_opentracker_running = 1;
45int g_self_pipe[2]; 45int g_self_pipe[2];
46 46
47static char * g_serverdir; 47static char *g_serverdir;
48static char * g_serveruser; 48static char *g_serveruser;
49static unsigned int g_udp_workers; 49static unsigned int g_udp_workers;
50 50
51static void panic( const char *routing ) __attribute__ ((noreturn)); 51static void panic(const char *routine) __attribute__((noreturn));
52static void panic( const char *routine ) { 52static void panic(const char *routine) {
53 fprintf( stderr, "%s: %s\n", routine, strerror(errno) ); 53 fprintf(stderr, "%s: %s\n", routine, strerror(errno));
54 exit( 111 ); 54 exit(111);
55} 55}
56 56
57static void signal_handler( int s ) { 57static void signal_handler(int s) {
58 if( s == SIGINT ) { 58 if (s == SIGINT) {
59 /* Any new interrupt signal quits the application */ 59 /* Any new interrupt signal quits the application */
60 signal( SIGINT, SIG_DFL); 60 signal(SIGINT, SIG_DFL);
61 61
62 /* Tell all other threads to not acquire any new lock on a bucket 62 /* Tell all other threads to not acquire any new lock on a bucket
63 but cancel their operations and return */ 63 but cancel their operations and return */
@@ -69,483 +69,566 @@ static void signal_handler( int s ) {
69 closelog(); 69 closelog();
70#endif 70#endif
71 71
72 exit( 0 ); 72 exit(0);
73 } else if( s == SIGALRM ) {
74 /* Maintain our copy of the clock. time() on BSDs is very expensive. */
75 g_now_seconds = time(NULL);
76 alarm(5);
77 } 73 }
78} 74}
79 75
80static void defaul_signal_handlers( void ) { 76static void defaul_signal_handlers(void) {
81 sigset_t signal_mask; 77 sigset_t signal_mask;
82 sigemptyset(&signal_mask); 78 sigemptyset(&signal_mask);
83 sigaddset (&signal_mask, SIGPIPE); 79 sigaddset(&signal_mask, SIGPIPE);
84 sigaddset (&signal_mask, SIGHUP); 80 sigaddset(&signal_mask, SIGHUP);
85 sigaddset (&signal_mask, SIGINT); 81 sigaddset(&signal_mask, SIGINT);
86 sigaddset (&signal_mask, SIGALRM); 82 sigaddset(&signal_mask, SIGALRM);
87 pthread_sigmask (SIG_BLOCK, &signal_mask, NULL); 83 pthread_sigmask(SIG_BLOCK, &signal_mask, NULL);
88} 84}
89 85
90static void install_signal_handlers( void ) { 86static void install_signal_handlers(void) {
91 struct sigaction sa; 87 struct sigaction sa;
92 sigset_t signal_mask; 88 sigset_t signal_mask;
93 sigemptyset(&signal_mask); 89 sigemptyset(&signal_mask);
94 90
95 sa.sa_handler = signal_handler; 91 sa.sa_handler = signal_handler;
96 sigemptyset(&sa.sa_mask); 92 sigemptyset(&sa.sa_mask);
97 sa.sa_flags = SA_RESTART; 93 sa.sa_flags = SA_RESTART;
98 if ((sigaction(SIGINT, &sa, NULL) == -1) || (sigaction(SIGALRM, &sa, NULL) == -1) ) 94 if ((sigaction(SIGINT, &sa, NULL) == -1) || (sigaction(SIGALRM, &sa, NULL) == -1))
99 panic( "install_signal_handlers" ); 95 panic("install_signal_handlers");
100 96
101 sigaddset (&signal_mask, SIGINT); 97 sigaddset(&signal_mask, SIGINT);
102 sigaddset (&signal_mask, SIGALRM); 98 pthread_sigmask(SIG_UNBLOCK, &signal_mask, NULL);
103 pthread_sigmask (SIG_UNBLOCK, &signal_mask, NULL);
104} 99}
105 100
106static void usage( char *name ) { 101static void usage(char *name) {
107 fprintf( stderr, "Usage: %s [-i ip] [-p port] [-P port] [-r redirect] [-d dir] [-u user] [-A ip] [-f config] [-s livesyncport]" 102 fprintf(stderr,
103 "Usage: %s [-i ip] [-p port] [-P port] [-r redirect] [-d dir] [-u user] [-A ip[/bits]] [-f config] [-s livesyncport]"
108#ifdef WANT_ACCESSLIST_BLACK 104#ifdef WANT_ACCESSLIST_BLACK
109 " [-b blacklistfile]" 105 " [-b blacklistfile]"
110#elif defined ( WANT_ACCESSLIST_WHITE ) 106#elif defined(WANT_ACCESSLIST_WHITE)
111 " [-w whitelistfile]" 107 " [-w whitelistfile]"
112#endif 108#endif
113 "\n", name ); 109 "\n",
110 name);
114} 111}
115 112
116#define HELPLINE(opt,desc) fprintf(stderr, "\t%-10s%s\n",opt,desc) 113#define HELPLINE(opt, desc) fprintf(stderr, "\t%-10s%s\n", opt, desc)
117static void help( char *name ) { 114static void help(char *name) {
118 usage( name ); 115 usage(name);
119 116
120 HELPLINE("-f config","include and execute the config file"); 117 HELPLINE("-f config", "include and execute the config file");
121 HELPLINE("-i ip","specify ip to bind to (default: *, you may specify more than one)"); 118 HELPLINE("-i ip", "specify ip to bind to with next -[pP] (default: any, overrides preceeding ones)");
122 HELPLINE("-p port","specify tcp port to bind to (default: 6969, you may specify more than one)"); 119 HELPLINE("-p port", "do bind to tcp port (default: 6969, you may specify more than one)");
123 HELPLINE("-P port","specify udp port to bind to (default: 6969, you may specify more than one)"); 120 HELPLINE("-P port", "do bind to udp port (default: 6969, you may specify more than one)");
124 HELPLINE("-r redirecturl","specify url where / should be redirected to (default none)"); 121 HELPLINE("-r redirecturl", "specify url where / should be redirected to (default none)");
125 HELPLINE("-d dir","specify directory to try to chroot to (default: \".\")"); 122 HELPLINE("-d dir", "specify directory to try to chroot to (default: \".\")");
126 HELPLINE("-u user","specify user under whose priviliges opentracker should run (default: \"nobody\")"); 123 HELPLINE("-u user", "specify user under whose privileges opentracker should run (default: \"nobody\")");
127 HELPLINE("-A ip","bless an ip address as admin address (e.g. to allow syncs from this address)"); 124 HELPLINE("-A ip[/bits]", "bless an ip address or net as admin address (e.g. to allow syncs from this address)");
128#ifdef WANT_ACCESSLIST_BLACK 125#ifdef WANT_ACCESSLIST_BLACK
129 HELPLINE("-b file","specify blacklist file."); 126 HELPLINE("-b file", "specify blacklist file.");
130#elif defined( WANT_ACCESSLIST_WHITE ) 127#elif defined(WANT_ACCESSLIST_WHITE)
131 HELPLINE("-w file","specify whitelist file."); 128 HELPLINE("-w file", "specify whitelist file.");
132#endif 129#endif
133 130
134 fprintf( stderr, "\nExample: ./opentracker -i 127.0.0.1 -p 6969 -P 6969 -f ./opentracker.conf -i 10.1.1.23 -p 2710 -p 80\n" ); 131 fprintf(stderr, "\nExample: ./opentracker -i 127.0.0.1 -p 6969 -P 6969 -f ./opentracker.conf -i 10.1.1.23 -p 2710 -p 80\n");
132 fprintf(stderr, " Here -i 127.0.0.1 selects the ip address for the next -p 6969 and -P 6969.\n");
133 fprintf(stderr, " If no port is bound from config file or command line, the last address given\n");
134 fprintf(stderr, " (or ::1 if none is set) will be used on port 6969.\n");
135} 135}
136#undef HELPLINE 136#undef HELPLINE
137 137
138static size_t header_complete( char * request, ssize_t byte_count ) { 138static ssize_t header_complete(char *request, ssize_t byte_count) {
139 int i = 0, state = 0; 139 ssize_t i = 0, state = 0;
140 140
141 for( i=1; i < byte_count; i+=2 ) 141 for (i = 1; i < byte_count; i += 2)
142 if( request[i] <= 13 ) { 142 if (request[i] <= 13) {
143 i--; 143 i--;
144 for( state = 0 ; i < byte_count; ++i ) { 144 for (state = 0; i < byte_count; ++i) {
145 char c = request[i]; 145 char c = request[i];
146 if( c == '\r' || c == '\n' ) 146 if (c == '\r' || c == '\n')
147 state = ( state >> 2 ) | ( ( c << 6 ) & 0xc0 ); 147 state = (state >> 2) | ((c << 6) & 0xc0);
148 else 148 else
149 break; 149 break;
150 if( state >= 0xa0 || state == 0x99 ) return i + 1; 150 if (state >= 0xa0 || state == 0x99)
151 return i + 1;
151 } 152 }
152 } 153 }
153 return 0; 154 return 0;
154} 155}
155 156
156static void handle_dead( const int64 sock ) { 157static void handle_dead(const int64 sock) {
157 struct http_data* cookie=io_getcookie( sock ); 158 struct http_data *cookie = io_getcookie(sock);
158 if( cookie ) { 159 if (cookie) {
159 iob_reset( &cookie->batch ); 160 size_t i;
160 array_reset( &cookie->request ); 161 for (i = 0; i < cookie->batches; ++i)
161 if( cookie->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK ) 162 iob_reset(cookie->batch + i);
162 mutex_workqueue_canceltask( sock ); 163 free(cookie->batch);
163 free( cookie ); 164 array_reset(&cookie->request);
165 if (cookie->flag & (STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER))
166 mutex_workqueue_canceltask(sock);
167 free(cookie);
164 } 168 }
165 io_close( sock ); 169 io_close(sock);
166} 170}
167 171
168static void handle_read( const int64 sock, struct ot_workstruct *ws ) { 172static void handle_read(const int64 sock, struct ot_workstruct *ws) {
169 struct http_data* cookie = io_getcookie( sock ); 173 struct http_data *cookie = io_getcookie(sock);
170 ssize_t byte_count; 174 ssize_t byte_count = io_tryread(sock, ws->inbuf, G_INBUF_SIZE);
171 175
172 if( ( byte_count = io_tryread( sock, ws->inbuf, G_INBUF_SIZE ) ) <= 0 ) { 176 if (byte_count == 0 || byte_count == -3) {
173 handle_dead( sock ); 177 handle_dead(sock);
174 return; 178 return;
175 } 179 }
176 180
181 if (byte_count == -1)
182 return;
183
177 /* If we get the whole request in one packet, handle it without copying */ 184 /* If we get the whole request in one packet, handle it without copying */
178 if( !array_start( &cookie->request ) ) { 185 if (!array_start(&cookie->request)) {
179 if( ( ws->header_size = header_complete( ws->inbuf, byte_count ) ) ) { 186 if ((ws->header_size = header_complete(ws->inbuf, byte_count))) {
180 ws->request = ws->inbuf; 187 ws->request = ws->inbuf;
181 ws->request_size = byte_count; 188 ws->request_size = byte_count;
182 http_handle_request( sock, ws ); 189 http_handle_request(sock, ws);
183 } else 190 } else
184 array_catb( &cookie->request, ws->inbuf, byte_count ); 191 array_catb(&cookie->request, ws->inbuf, (size_t)byte_count);
185 return; 192 return;
186 } 193 }
187 194
188 array_catb( &cookie->request, ws->inbuf, byte_count ); 195 array_catb(&cookie->request, ws->inbuf, byte_count);
189 if( array_failed( &cookie->request ) || array_bytes( &cookie->request ) > 8192 ) { 196 if (array_failed(&cookie->request) || array_bytes(&cookie->request) > 8192) {
190 http_issue_error( sock, ws, CODE_HTTPERROR_500 ); 197 http_issue_error(sock, ws, CODE_HTTPERROR_500);
191 return; 198 return;
192 } 199 }
193 200
194 while( ( ws->header_size = header_complete( array_start( &cookie->request ), array_bytes( &cookie->request ) ) ) ) { 201 while ((ws->header_size = header_complete(array_start(&cookie->request), array_bytes(&cookie->request)))) {
195 ws->request = array_start( &cookie->request ); 202 ws->request = array_start(&cookie->request);
196 ws->request_size = array_bytes( &cookie->request ); 203 ws->request_size = array_bytes(&cookie->request);
197 http_handle_request( sock, ws ); 204 http_handle_request(sock, ws);
198#ifdef WANT_KEEPALIVE 205#ifdef WANT_KEEPALIVE
199 if( !ws->keep_alive ) 206 if (!ws->keep_alive)
200#endif 207#endif
201 return; 208 return;
202 } 209 }
203} 210}
204 211
205static void handle_write( const int64 sock ) { 212static void handle_write(const int64 sock) {
206 struct http_data* cookie=io_getcookie( sock ); 213 struct http_data *cookie = io_getcookie(sock);
207 if( !cookie || ( iob_send( sock, &cookie->batch ) <= 0 ) ) 214 size_t i;
208 handle_dead( sock ); 215 int chunked = 0;
216
217 /* Look for the first io_batch still containing bytes to write */
218 if (cookie) {
219 if (cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER)
220 chunked = 1;
221
222 for (i = 0; i < cookie->batches; ++i) {
223 if (cookie->batch[i].bytesleft) {
224 int64 res = iob_send(sock, cookie->batch + i);
225
226 if (res == -3) {
227 handle_dead(sock);
228 return;
229 }
230
231 if (!cookie->batch[i].bytesleft)
232 continue;
233
234 if (res == -1 || res > 0 || i < cookie->batches - 1)
235 return;
236 }
237 }
238 }
239
240 /* In a chunked transfer after all batches accumulated have been sent, wait for the next one */
241 if (chunked)
242 io_dontwantwrite(sock);
243 else
244 handle_dead(sock);
209} 245}
210 246
211static void handle_accept( const int64 serversocket ) { 247static void handle_accept(const int64 serversocket) {
212 struct http_data *cookie; 248 struct http_data *cookie;
213 int64 sock; 249 int64 sock;
214 ot_ip6 ip; 250 ot_ip6 ip;
215 uint16 port; 251 uint16 port;
216 tai6464 t; 252 tai6464 t;
217 253
218 while( ( sock = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) { 254 while ((sock = socket_accept6(serversocket, ip, &port, NULL)) != -1) {
219 255
220 /* Put fd into a non-blocking mode */ 256 /* Put fd into a non-blocking mode */
221 io_nonblock( sock ); 257 io_nonblock(sock);
222 258
223 if( !io_fd( sock ) || 259 if (!io_fd(sock) || !(cookie = (struct http_data *)malloc(sizeof(struct http_data)))) {
224 !( cookie = (struct http_data*)malloc( sizeof(struct http_data) ) ) ) { 260 io_close(sock);
225 io_close( sock );
226 continue; 261 continue;
227 } 262 }
228 memset(cookie, 0, sizeof( struct http_data ) ); 263 memset(cookie, 0, sizeof(struct http_data));
229 memcpy(cookie->ip,ip,sizeof(ot_ip6)); 264 memcpy(cookie->ip, ip, sizeof(ot_ip6));
230 265
231 io_setcookie( sock, cookie ); 266 io_setcookie(sock, cookie);
232 io_wantread( sock ); 267 io_wantread(sock);
233 268
234 stats_issue_event( EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip); 269 stats_issue_event(EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip);
235 270
236 /* That breaks taia encapsulation. But there is no way to take system 271 /* That breaks taia encapsulation. But there is no way to take system
237 time this often in FreeBSD and libowfat does not allow to set unix time */ 272 time this often in FreeBSD and libowfat does not allow to set unix time */
238 taia_uint( &t, 0 ); /* Clear t */ 273 taia_uint(&t, 0); /* Clear t */
239 tai_unix( &(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT) ); 274 tai_unix(&(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT));
240 io_timeout( sock, t ); 275 io_timeout(sock, t);
241 } 276 }
242 io_eagain(serversocket); 277 io_eagain(serversocket);
243} 278}
244 279
245static void * server_mainloop( void * args ) { 280static void *server_mainloop(void *args) {
246 struct ot_workstruct ws; 281 struct ot_workstruct ws;
247 time_t next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; 282 time_t next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL;
248 struct iovec *iovector; 283 struct iovec *iovector;
249 int iovec_entries; 284 int iovec_entries, is_partial;
250 285
251 (void)args; 286 (void)args;
252 287
253 /* Initialize our "thread local storage" */ 288 /* Initialize our "thread local storage" */
254 ws.inbuf = malloc( G_INBUF_SIZE ); 289 ws.inbuf = malloc(G_INBUF_SIZE);
255 ws.outbuf = malloc( G_OUTBUF_SIZE ); 290 ws.outbuf = malloc(G_OUTBUF_SIZE);
256#ifdef _DEBUG_HTTPERROR 291#ifdef _DEBUG_HTTPERROR
257 ws.debugbuf= malloc( G_DEBUGBUF_SIZE ); 292 ws.debugbuf = malloc(G_DEBUGBUF_SIZE);
258#endif 293#endif
259 if( !ws.inbuf || !ws.outbuf )
260 panic( "Initializing worker failed" );
261 294
262 for( ; ; ) { 295 if (!ws.inbuf || !ws.outbuf)
296 panic("Initializing worker failed");
297
298#ifdef WANT_ARC4RANDOM
299 arc4random_buf(&ws.rand48_state[0], 3 * sizeof(uint16_t));
300#else
301 ws.rand48_state[0] = (uint16_t)random();
302 ws.rand48_state[1] = (uint16_t)random();
303 ws.rand48_state[2] = (uint16_t)random();
304#endif
305
306 for (;;) {
263 int64 sock; 307 int64 sock;
264 308
265 io_wait(); 309 io_wait();
266 310
267 while( ( sock = io_canread( ) ) != -1 ) { 311 while ((sock = io_canread()) != -1) {
268 const void *cookie = io_getcookie( sock ); 312 const void *cookie = io_getcookie(sock);
269 if( (intptr_t)cookie == FLAG_TCP ) 313 if ((intptr_t)cookie == FLAG_TCP)
270 handle_accept( sock ); 314 handle_accept(sock);
271 else if( (intptr_t)cookie == FLAG_UDP ) 315 else if ((intptr_t)cookie == FLAG_UDP)
272 handle_udp6( sock, &ws ); 316 handle_udp6(sock, &ws);
273 else if( (intptr_t)cookie == FLAG_SELFPIPE ) { 317 else if ((intptr_t)cookie == FLAG_SELFPIPE)
274 io_tryread( sock, ws.inbuf, G_INBUF_SIZE ); 318 io_tryread(sock, ws.inbuf, G_INBUF_SIZE);
275 fprintf(stderr, "pipe\n"); 319 else
276 } else 320 handle_read(sock, &ws);
277 handle_read( sock, &ws );
278 } 321 }
279 322
280 while( ( sock = mutex_workqueue_popresult( &iovec_entries, &iovector ) ) != -1 ) 323 while ((sock = mutex_workqueue_popresult(&iovec_entries, &iovector, &is_partial)) != -1)
281 http_sendiovecdata( sock, &ws, iovec_entries, iovector ); 324 http_sendiovecdata(sock, &ws, iovec_entries, iovector, is_partial);
282 325
283 while( ( sock = io_canwrite( ) ) != -1 ) 326 while ((sock = io_canwrite()) != -1)
284 handle_write( sock ); 327 handle_write(sock);
285 328
286 if( g_now_seconds > next_timeout_check ) { 329 if (g_now_seconds > next_timeout_check) {
287 while( ( sock = io_timeouted() ) != -1 ) 330 while ((sock = io_timeouted()) != -1)
288 handle_dead( sock ); 331 handle_dead(sock);
289 next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; 332 next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL;
290 } 333 }
291 334
292 livesync_ticker(); 335 livesync_ticker();
293
294 /* Enforce setting the clock */
295 signal_handler( SIGALRM );
296 } 336 }
297 return 0; 337 return 0;
298} 338}
299 339
300static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) { 340static int64_t ot_try_bind(ot_ip6 ip, uint16_t port, PROTO_FLAG proto) {
301 int64 sock = proto == FLAG_TCP ? socket_tcp6( ) : socket_udp6( ); 341 int64 sock = proto == FLAG_TCP ? socket_tcp6() : socket_udp6();
302
303#ifndef WANT_V6
304 if( !ip6_isv4mapped(ip) ) {
305 exerr( "V4 Tracker is V4 only!" );
306 }
307#else
308 if( ip6_isv4mapped(ip) ) {
309 exerr( "V6 Tracker is V6 only!" );
310 }
311#endif
312 342
313#ifdef _DEBUG 343#ifdef _DEBUG
314 { 344 {
315 char *protos[] = {"TCP","UDP","UDP mcast"}; 345 char *protos[] = {"TCP", "UDP", "UDP mcast"};
316 char _debug[512]; 346 char _debug[512];
317 int off = snprintf( _debug, sizeof(_debug), "Binding socket type %s to address [", protos[proto] ); 347 int off = snprintf(_debug, sizeof(_debug), "Binding socket type %s to address [", protos[proto]);
318 off += fmt_ip6c( _debug+off, ip); 348 off += fmt_ip6c(_debug + off, ip);
319 snprintf( _debug + off, sizeof(_debug)-off, "]:%d...", port); 349 snprintf(_debug + off, sizeof(_debug) - off, "]:%d...", port);
320 fputs( _debug, stderr ); 350 fputs(_debug, stderr);
321 } 351 }
322#endif 352#endif
323 353
324 if( socket_bind6_reuse( sock, ip, port, 0 ) == -1 ) 354 if (socket_bind6_reuse(sock, ip, port, 0) == -1)
325 panic( "socket_bind6_reuse" ); 355 panic("socket_bind6_reuse");
326 356
327 if( ( proto == FLAG_TCP ) && ( socket_listen( sock, SOMAXCONN) == -1 ) ) 357 if ((proto == FLAG_TCP) && (socket_listen(sock, SOMAXCONN) == -1))
328 panic( "socket_listen" ); 358 panic("socket_listen");
329 359
330 if( !io_fd( sock ) ) 360 if (!io_fd(sock))
331 panic( "io_fd" ); 361 panic("io_fd");
332 362
333 io_setcookie( sock, (void*)proto ); 363 io_setcookie(sock, (void *)proto);
334 364
335 if( (proto == FLAG_UDP) && g_udp_workers ) { 365 if ((proto == FLAG_UDP) && g_udp_workers) {
336 io_block( sock ); 366 io_block(sock);
337 udp_init( sock, g_udp_workers ); 367 udp_init(sock, g_udp_workers);
338 } else 368 } else
339 io_wantread( sock ); 369 io_wantread(sock);
340 370
341#ifdef _DEBUG 371#ifdef _DEBUG
342 fputs( " success.\n", stderr); 372 fputs(" success.\n", stderr);
343#endif 373#endif
344 374
345 return sock; 375 return sock;
346} 376}
347 377
348char * set_config_option( char **option, char *value ) { 378char *set_config_option(char **option, char *value) {
349#ifdef _DEBUG 379#ifdef _DEBUG
350 fprintf( stderr, "Setting config option: %s\n", value ); 380 fprintf(stderr, "Setting config option: %s\n", value);
351#endif 381#endif
352 while( isspace(*value) ) ++value; 382 while (isspace(*value))
353 free( *option ); 383 ++value;
354 return *option = strdup( value ); 384 free(*option);
385 return *option = strdup(value);
355} 386}
356 387
357static int scan_ip6_port( const char *src, ot_ip6 ip, uint16 *port ) { 388static int scan_ip6_port(const char *src, ot_ip6 ip, uint16 *port) {
358 const char *s = src; 389 const char *s = src;
359 int off, bracket = 0; 390 int off, bracket = 0;
360 while( isspace(*s) ) ++s; 391 while (isspace(*s))
361 if( *s == '[' ) ++s, ++bracket; /* for v6 style notation */ 392 ++s;
362 if( !(off = scan_ip6( s, ip ) ) ) 393 if (*s == '[')
394 ++s, ++bracket; /* for v6 style notation */
395 if (!(off = scan_ip6(s, ip)))
363 return 0; 396 return 0;
364 s += off; 397 s += off;
365 if( bracket && *s == ']' ) ++s; 398 if (bracket && *s == ']')
366 if( *s == 0 || isspace(*s)) return s-src; 399 ++s;
367 if( !ip6_isv4mapped(ip)){ 400 if (*s == 0 || isspace(*s))
368 if( *s != ':' && *s != '.' ) return 0; 401 return s - src;
369 if( !bracket && *(s) == ':' ) return 0; 402 if (!ip6_isv4mapped(ip)) {
403 if (*s != ':' && *s != '.')
404 return 0;
405 if (!bracket && *(s) == ':')
406 return 0;
370 s++; 407 s++;
371 } else { 408 } else {
372 if( *(s++) != ':' ) return 0; 409 if (*(s++) != ':')
410 return 0;
411 }
412 if (!(off = scan_ushort(s, port)))
413 return 0;
414 return off + s - src;
415}
416
417static int scan_ip6_net(const char *src, ot_net *net) {
418 const char *s = src;
419 int off;
420 while (isspace(*s))
421 ++s;
422 if (!(off = scan_ip6(s, net->address)))
423 return 0;
424 s += off;
425 if (*s != '/')
426 net->bits = 128;
427 else {
428 s++;
429 if (!(off = scan_int(s, &net->bits)))
430 return 0;
431 if (ip6_isv4mapped(net->address))
432 net->bits += 96;
433 if (net->bits > 128)
434 return 0;
435 s += off;
373 } 436 }
374 if( !(off = scan_ushort (s, port ) ) ) 437 return off + s - src;
375 return 0;
376 return off+s-src;
377} 438}
378 439
379int parse_configfile( char * config_filename ) { 440int parse_configfile(char *config_filename) {
380 FILE * accesslist_filehandle; 441 FILE *accesslist_filehandle;
381 char inbuf[512]; 442 char inbuf[512];
382 ot_ip6 tmpip; 443 ot_ip6 tmpip;
383 int bound = 0; 444#if defined(WANT_RESTRICT_STATS) || defined(WANT_IP_FROM_PROXY) || defined(WANT_SYNC_LIVE)
445 ot_net tmpnet;
446#endif
447 int bound = 0;
384 448
385 accesslist_filehandle = fopen( config_filename, "r" ); 449 accesslist_filehandle = fopen(config_filename, "r");
386 450
387 if( accesslist_filehandle == NULL ) { 451 if (accesslist_filehandle == NULL) {
388 fprintf( stderr, "Warning: Can't open config file: %s.", config_filename ); 452 fprintf(stderr, "Warning: Can't open config file: %s.", config_filename);
389 return 0; 453 return 0;
390 } 454 }
391 455
392 while( fgets( inbuf, sizeof(inbuf), accesslist_filehandle ) ) { 456 while (fgets(inbuf, sizeof(inbuf), accesslist_filehandle)) {
393 char *p = inbuf; 457 char *p = inbuf;
394 size_t strl; 458 size_t strl;
395 459
396 /* Skip white spaces */ 460 /* Skip white spaces */
397 while(isspace(*p)) ++p; 461 while (isspace(*p))
462 ++p;
398 463
399 /* Ignore comments and empty lines */ 464 /* Ignore comments and empty lines */
400 if((*p=='#')||(*p=='\n')||(*p==0)) continue; 465 if ((*p == '#') || (*p == '\n') || (*p == 0))
466 continue;
401 467
402 /* consume trailing new lines and spaces */ 468 /* consume trailing new lines and spaces */
403 strl = strlen(p); 469 strl = strlen(p);
404 while( strl && isspace(p[strl-1])) 470 while (strl && isspace(p[strl - 1]))
405 p[--strl] = 0; 471 p[--strl] = 0;
406 472
407 /* Scan for commands */ 473 /* Scan for commands */
408 if(!byte_diff(p,15,"tracker.rootdir" ) && isspace(p[15])) { 474 if (!byte_diff(p, 15, "tracker.rootdir") && isspace(p[15])) {
409 set_config_option( &g_serverdir, p+16 ); 475 set_config_option(&g_serverdir, p + 16);
410 } else if(!byte_diff(p,12,"tracker.user" ) && isspace(p[12])) { 476 } else if (!byte_diff(p, 12, "tracker.user") && isspace(p[12])) {
411 set_config_option( &g_serveruser, p+13 ); 477 set_config_option(&g_serveruser, p + 13);
412 } else if(!byte_diff(p,14,"listen.tcp_udp" ) && isspace(p[14])) { 478 } else if (!byte_diff(p, 14, "listen.tcp_udp") && isspace(p[14])) {
413 uint16_t tmpport = 6969; 479 uint16_t tmpport = 6969;
414 if( !scan_ip6_port( p+15, tmpip, &tmpport )) goto parse_error; 480 if (!scan_ip6_port(p + 15, tmpip, &tmpport))
415 ot_try_bind( tmpip, tmpport, FLAG_TCP ); ++bound; 481 goto parse_error;
416 ot_try_bind( tmpip, tmpport, FLAG_UDP ); ++bound; 482 ot_try_bind(tmpip, tmpport, FLAG_TCP);
417 } else if(!byte_diff(p,10,"listen.tcp" ) && isspace(p[10])) { 483 ++bound;
484 ot_try_bind(tmpip, tmpport, FLAG_UDP);
485 ++bound;
486 } else if (!byte_diff(p, 10, "listen.tcp") && isspace(p[10])) {
418 uint16_t tmpport = 6969; 487 uint16_t tmpport = 6969;
419 if( !scan_ip6_port( p+11, tmpip, &tmpport )) goto parse_error; 488 if (!scan_ip6_port(p + 11, tmpip, &tmpport))
420 ot_try_bind( tmpip, tmpport, FLAG_TCP ); 489 goto parse_error;
490 ot_try_bind(tmpip, tmpport, FLAG_TCP);
421 ++bound; 491 ++bound;
422 } else if(!byte_diff(p, 10, "listen.udp" ) && isspace(p[10])) { 492 } else if (!byte_diff(p, 10, "listen.udp") && isspace(p[10])) {
423 uint16_t tmpport = 6969; 493 uint16_t tmpport = 6969;
424 if( !scan_ip6_port( p+11, tmpip, &tmpport )) goto parse_error; 494 if (!scan_ip6_port(p + 11, tmpip, &tmpport))
425 ot_try_bind( tmpip, tmpport, FLAG_UDP ); 495 goto parse_error;
496 ot_try_bind(tmpip, tmpport, FLAG_UDP);
426 ++bound; 497 ++bound;
427 } else if(!byte_diff(p,18,"listen.udp.workers" ) && isspace(p[18])) { 498 } else if (!byte_diff(p, 18, "listen.udp.workers") && isspace(p[18])) {
428 char *value = p + 18; 499 char *value = p + 18;
429 while( isspace(*value) ) ++value; 500 while (isspace(*value))
430 scan_uint( value, &g_udp_workers ); 501 ++value;
502 scan_uint(value, &g_udp_workers);
431#ifdef WANT_ACCESSLIST_WHITE 503#ifdef WANT_ACCESSLIST_WHITE
432 } else if(!byte_diff(p, 16, "access.whitelist" ) && isspace(p[16])) { 504 } else if (!byte_diff(p, 16, "access.whitelist") && isspace(p[16])) {
433 set_config_option( &g_accesslist_filename, p+17 ); 505 set_config_option(&g_accesslist_filename, p + 17);
434#elif defined( WANT_ACCESSLIST_BLACK ) 506#elif defined(WANT_ACCESSLIST_BLACK)
435 } else if(!byte_diff(p, 16, "access.blacklist" ) && isspace(p[16])) { 507 } else if (!byte_diff(p, 16, "access.blacklist") && isspace(p[16])) {
436 set_config_option( &g_accesslist_filename, p+17 ); 508 set_config_option(&g_accesslist_filename, p + 17);
509#endif
510#ifdef WANT_DYNAMIC_ACCESSLIST
511 } else if (!byte_diff(p, 15, "access.fifo_add") && isspace(p[15])) {
512 set_config_option(&g_accesslist_pipe_add, p + 16);
513 } else if (!byte_diff(p, 18, "access.fifo_delete") && isspace(p[18])) {
514 set_config_option(&g_accesslist_pipe_delete, p + 19);
437#endif 515#endif
438#ifdef WANT_RESTRICT_STATS 516#ifdef WANT_RESTRICT_STATS
439 } else if(!byte_diff(p, 12, "access.stats" ) && isspace(p[12])) { 517 } else if (!byte_diff(p, 12, "access.stats") && isspace(p[12])) {
440 if( !scan_ip6( p+13, tmpip )) goto parse_error; 518 if (!scan_ip6_net(p + 13, &tmpnet))
441 accesslist_blessip( tmpip, OT_PERMISSION_MAY_STAT ); 519 goto parse_error;
520 accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_STAT);
442#endif 521#endif
443 } else if(!byte_diff(p, 17, "access.stats_path" ) && isspace(p[17])) { 522 } else if (!byte_diff(p, 17, "access.stats_path") && isspace(p[17])) {
444 set_config_option( &g_stats_path, p+18 ); 523 set_config_option(&g_stats_path, p + 18);
445#ifdef WANT_IP_FROM_PROXY 524#ifdef WANT_IP_FROM_PROXY
446 } else if(!byte_diff(p, 12, "access.proxy" ) && isspace(p[12])) { 525 } else if (!byte_diff(p, 12, "access.proxy") && isspace(p[12])) {
447 if( !scan_ip6( p+13, tmpip )) goto parse_error; 526 if (!scan_ip6_net(p + 13, &tmpnet))
448 accesslist_blessip( tmpip, OT_PERMISSION_MAY_PROXY ); 527 goto parse_error;
528 accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_PROXY);
449#endif 529#endif
450 } else if(!byte_diff(p, 20, "tracker.redirect_url" ) && isspace(p[20])) { 530 } else if (!byte_diff(p, 20, "tracker.redirect_url") && isspace(p[20])) {
451 set_config_option( &g_redirecturl, p+21 ); 531 set_config_option(&g_redirecturl, p + 21);
452#ifdef WANT_SYNC_LIVE 532#ifdef WANT_SYNC_LIVE
453 } else if(!byte_diff(p, 24, "livesync.cluster.node_ip" ) && isspace(p[24])) { 533 } else if (!byte_diff(p, 24, "livesync.cluster.node_ip") && isspace(p[24])) {
454 if( !scan_ip6( p+25, tmpip )) goto parse_error; 534 if (!scan_ip6_net(p + 25, &tmpnet))
455 accesslist_blessip( tmpip, OT_PERMISSION_MAY_LIVESYNC ); 535 goto parse_error;
456 } else if(!byte_diff(p, 23, "livesync.cluster.listen" ) && isspace(p[23])) { 536 accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_LIVESYNC);
537 } else if (!byte_diff(p, 23, "livesync.cluster.listen") && isspace(p[23])) {
457 uint16_t tmpport = LIVESYNC_PORT; 538 uint16_t tmpport = LIVESYNC_PORT;
458 if( !scan_ip6_port( p+24, tmpip, &tmpport )) goto parse_error; 539 if (!scan_ip6_port(p + 24, tmpip, &tmpport))
459 livesync_bind_mcast( tmpip, tmpport ); 540 goto parse_error;
541 livesync_bind_mcast(tmpip, tmpport);
460#endif 542#endif
461 } else 543 } else
462 fprintf( stderr, "Unhandled line in config file: %s\n", inbuf ); 544 fprintf(stderr, "Unhandled line in config file: %s\n", inbuf);
463 continue; 545 continue;
464 parse_error: 546 parse_error:
465 fprintf( stderr, "Parse error in config file: %s\n", inbuf); 547 fprintf(stderr, "Parse error in config file: %s\n", inbuf);
466 } 548 }
467 fclose( accesslist_filehandle ); 549 fclose(accesslist_filehandle);
468 return bound; 550 return bound;
469} 551}
470 552
471void load_state(const char * const state_filename ) { 553void load_state(const char *const state_filename) {
472 FILE * state_filehandle; 554 FILE *state_filehandle;
473 char inbuf[512]; 555 char inbuf[512];
474 ot_hash infohash; 556 ot_hash infohash;
475 unsigned long long base, downcount; 557 unsigned long long base, downcount;
476 int consumed; 558 int consumed;
477 559
478 state_filehandle = fopen( state_filename, "r" ); 560 state_filehandle = fopen(state_filename, "r");
479 561
480 if( state_filehandle == NULL ) { 562 if (state_filehandle == NULL) {
481 fprintf( stderr, "Warning: Can't open config file: %s.", state_filename ); 563 fprintf(stderr, "Warning: Can't open config file: %s.", state_filename);
482 return; 564 return;
483 } 565 }
484 566
485 /* We do ignore anything that is not of the form "^[:xdigit:]:\d+:\d+" */ 567 /* We do ignore anything that is not of the form "^[:xdigit:]:\d+:\d+" */
486 while( fgets( inbuf, sizeof(inbuf), state_filehandle ) ) { 568 while (fgets(inbuf, sizeof(inbuf), state_filehandle)) {
487 int i; 569 int i;
488 for( i=0; i<(int)sizeof(ot_hash); ++i ) { 570 for (i = 0; i < (int)sizeof(ot_hash); ++i) {
489 int eger = 16 * scan_fromhex( inbuf[ 2*i ] ) + scan_fromhex( inbuf[ 1 + 2*i ] ); 571 int eger = 16 * scan_fromhex(inbuf[2 * i]) + scan_fromhex(inbuf[1 + 2 * i]);
490 if( eger < 0 ) 572 if (eger < 0)
491 continue; 573 continue;
492 infohash[i] = eger; 574 infohash[i] = eger;
493 } 575 }
494 576
495 if( i != (int)sizeof(ot_hash) ) continue; 577 if (i != (int)sizeof(ot_hash))
578 continue;
496 i *= 2; 579 i *= 2;
497 580
498 if( inbuf[ i++ ] != ':' || !( consumed = scan_ulonglong( inbuf+i, &base ) ) ) continue; 581 if (inbuf[i++] != ':' || !(consumed = scan_ulonglong(inbuf + i, &base)))
582 continue;
499 i += consumed; 583 i += consumed;
500 if( inbuf[ i++ ] != ':' || !( consumed = scan_ulonglong( inbuf+i, &downcount ) ) ) continue; 584 if (inbuf[i++] != ':' || !(consumed = scan_ulonglong(inbuf + i, &downcount)))
501 add_torrent_from_saved_state( infohash, base, downcount ); 585 continue;
586 add_torrent_from_saved_state(infohash, base, downcount);
502 } 587 }
503 588
504 fclose( state_filehandle ); 589 fclose(state_filehandle);
505} 590}
506 591
507int drop_privileges ( const char * const serveruser, const char * const serverdir ) { 592int drop_privileges(const char *const serveruser, const char *const serverdir) {
508 struct passwd *pws = NULL; 593 struct passwd *pws = NULL;
509 594
510#ifdef _DEBUG 595#ifdef _DEBUG
511 if( !geteuid() ) 596 if (!geteuid())
512 fprintf( stderr, "Dropping to user %s.\n", serveruser ); 597 fprintf(stderr, "Dropping to user %s.\n", serveruser);
513 if( serverdir ) 598 if (serverdir)
514 fprintf( stderr, "ch%s'ing to directory %s.\n", geteuid() ? "dir" : "root", serverdir ); 599 fprintf(stderr, "ch%s'ing to directory %s.\n", geteuid() ? "dir" : "root", serverdir);
515#endif 600#endif
516 601
517 /* Grab pws entry before chrooting */ 602 /* Grab pws entry before chrooting */
518 pws = getpwnam( serveruser ); 603 pws = getpwnam(serveruser);
519 endpwent(); 604 endpwent();
520 605
521 if( geteuid() == 0 ) { 606 if (geteuid() == 0) {
522 /* Running as root: chroot and drop privileges */ 607 /* Running as root: chroot and drop privileges */
523 if( serverdir && chroot( serverdir ) ) { 608 if (serverdir && chroot(serverdir)) {
524 fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) ); 609 fprintf(stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno));
525 return -1; 610 return -1;
526 } 611 }
527 612
528 if(chdir("/")) 613 if (chdir("/"))
529 panic("chdir() failed after chrooting: "); 614 panic("chdir() failed after chrooting: ");
530 615
531 /* If we can't find server user, revert to nobody's default uid */ 616 /* If we can't find server user, revert to nobody's default uid */
532 if( !pws ) { 617 if (!pws) {
533 fprintf( stderr, "Warning: Could not get password entry for %s. Reverting to uid -2.\n", serveruser ); 618 fprintf(stderr, "Warning: Could not get password entry for %s. Reverting to uid -2.\n", serveruser);
534 setegid( (gid_t)-2 ); setgid( (gid_t)-2 ); 619 if (setegid((gid_t)-2) || setgid((gid_t)-2) || setuid((uid_t)-2) || seteuid((uid_t)-2))
535 setuid( (uid_t)-2 ); seteuid( (uid_t)-2 ); 620 panic("Could not set uid to value -2");
536 } 621 } else {
537 else { 622 if (setegid(pws->pw_gid) || setgid(pws->pw_gid) || setuid(pws->pw_uid) || seteuid(pws->pw_uid))
538 setegid( pws->pw_gid ); setgid( pws->pw_gid ); 623 panic("Could not set uid to specified value");
539 setuid( pws->pw_uid ); seteuid( pws->pw_uid );
540 } 624 }
541 625
542 if( geteuid() == 0 || getegid() == 0 ) 626 if (geteuid() == 0 || getegid() == 0)
543 panic("Still running with root privileges?!"); 627 panic("Still running with root privileges?!");
544 } 628 } else {
545 else {
546 /* Normal user, just chdir() */ 629 /* Normal user, just chdir() */
547 if( serverdir && chdir( serverdir ) ) { 630 if (serverdir && chdir(serverdir)) {
548 fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) ); 631 fprintf(stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno));
549 return -1; 632 return -1;
550 } 633 }
551 } 634 }
@@ -553,118 +636,173 @@ int drop_privileges ( const char * const serveruser, const char * const serverdi
553 return 0; 636 return 0;
554} 637}
555 638
556int main( int argc, char **argv ) { 639/* Maintain our copy of the clock. time() on BSDs is very expensive. */
557 ot_ip6 serverip, tmpip; 640static void *time_caching_worker(void *args) {
558 int bound = 0, scanon = 1; 641 (void)args;
559 uint16_t tmpport; 642 while (1) {
560 char * statefile = 0; 643 g_now_seconds = time(NULL);
644 sleep(5);
645 }
646 return NULL;
647}
561 648
562 memset( serverip, 0, sizeof(ot_ip6) ); 649int main(int argc, char **argv) {
563#ifndef WANT_V6 650 ot_ip6 serverip;
564 serverip[10]=serverip[11]=-1; 651 ot_net tmpnet;
565 noipv6=1; 652 int bound = 0, scanon = 1;
653 uint16_t tmpport;
654 char *statefile = 0;
655 pthread_t thread_id; /* time cacher */
656
657 memset(serverip, 0, sizeof(ot_ip6));
658#ifdef WANT_V4_ONLY
659 serverip[10] = serverip[11] = -1;
566#endif 660#endif
567 661
568#ifdef WANT_DEV_RANDOM 662#ifdef WANT_DEV_RANDOM
569 srandomdev(); 663 srandomdev();
570#else 664#else
571 srandom( time(NULL) ); 665 srandom(time(NULL));
572#endif 666#endif
573 667
574 while( scanon ) { 668 while (scanon) {
575 switch( getopt( argc, argv, ":i:p:A:P:d:u:r:s:f:l:v" 669 switch (getopt(argc, argv,
670 ":i:p:A:P:d:u:r:s:f:l:v"
576#ifdef WANT_ACCESSLIST_BLACK 671#ifdef WANT_ACCESSLIST_BLACK
577"b:" 672 "b:"
578#elif defined( WANT_ACCESSLIST_WHITE ) 673#elif defined(WANT_ACCESSLIST_WHITE)
579"w:" 674 "w:"
580#endif 675#endif
581 "h" ) ) { 676 "h")) {
582 case -1 : scanon = 0; break; 677 case -1:
583 case 'i': 678 scanon = 0;
584 if( !scan_ip6( optarg, serverip )) { usage( argv[0] ); exit( 1 ); } 679 break;
585 break; 680 case 'i':
681 if (!scan_ip6(optarg, serverip)) {
682 usage(argv[0]);
683 exit(1);
684 }
685 break;
586#ifdef WANT_ACCESSLIST_BLACK 686#ifdef WANT_ACCESSLIST_BLACK
587 case 'b': set_config_option( &g_accesslist_filename, optarg); break; 687 case 'b':
588#elif defined( WANT_ACCESSLIST_WHITE ) 688 set_config_option(&g_accesslist_filename, optarg);
589 case 'w': set_config_option( &g_accesslist_filename, optarg); break; 689 break;
690#elif defined(WANT_ACCESSLIST_WHITE)
691 case 'w':
692 set_config_option(&g_accesslist_filename, optarg);
693 break;
590#endif 694#endif
591 case 'p': 695 case 'p':
592 if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } 696 if (!scan_ushort(optarg, &tmpport)) {
593 ot_try_bind( serverip, tmpport, FLAG_TCP ); bound++; break; 697 usage(argv[0]);
594 case 'P': 698 exit(1);
595 if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } 699 }
596 ot_try_bind( serverip, tmpport, FLAG_UDP ); bound++; break; 700 ot_try_bind(serverip, tmpport, FLAG_TCP);
701 bound++;
702 break;
703 case 'P':
704 if (!scan_ushort(optarg, &tmpport)) {
705 usage(argv[0]);
706 exit(1);
707 }
708 ot_try_bind(serverip, tmpport, FLAG_UDP);
709 bound++;
710 break;
597#ifdef WANT_SYNC_LIVE 711#ifdef WANT_SYNC_LIVE
598 case 's': 712 case 's':
599 if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } 713 if (!scan_ushort(optarg, &tmpport)) {
600 livesync_bind_mcast( serverip, tmpport); break; 714 usage(argv[0]);
715 exit(1);
716 }
717 livesync_bind_mcast(serverip, tmpport);
718 break;
601#endif 719#endif
602 case 'd': set_config_option( &g_serverdir, optarg ); break; 720 case 'd':
603 case 'u': set_config_option( &g_serveruser, optarg ); break; 721 set_config_option(&g_serverdir, optarg);
604 case 'r': set_config_option( &g_redirecturl, optarg ); break; 722 break;
605 case 'l': statefile = optarg; break; 723 case 'u':
606 case 'A': 724 set_config_option(&g_serveruser, optarg);
607 if( !scan_ip6( optarg, tmpip )) { usage( argv[0] ); exit( 1 ); } 725 break;
608 accesslist_blessip( tmpip, 0xffff ); /* Allow everything for now */ 726 case 'r':
609 break; 727 set_config_option(&g_redirecturl, optarg);
610 case 'f': bound += parse_configfile( optarg ); break; 728 break;
611 case 'h': help( argv[0] ); exit( 0 ); 729 case 'l':
612 case 'v': { 730 statefile = optarg;
613 char buffer[8192]; 731 break;
614 stats_return_tracker_version( buffer ); 732 case 'A':
615 fputs( buffer, stderr ); 733 if (!scan_ip6_net(optarg, &tmpnet)) {
616 exit( 0 ); 734 usage(argv[0]);
735 exit(1);
617 } 736 }
618 default: 737 accesslist_bless_net(&tmpnet, 0xffff); /* Allow everything for now */
619 case '?': usage( argv[0] ); exit( 1 ); 738 break;
739 case 'f':
740 bound += parse_configfile(optarg);
741 break;
742 case 'h':
743 help(argv[0]);
744 exit(0);
745 case 'v': {
746 char buffer[8192];
747 stats_return_tracker_version(buffer);
748 fputs(buffer, stderr);
749 exit(0);
750 }
751 default:
752 case '?':
753 usage(argv[0]);
754 exit(1);
620 } 755 }
621 } 756 }
622 757
623 /* Bind to our default tcp/udp ports */ 758 /* Bind to our default tcp/udp ports */
624 if( !bound) { 759 if (!bound) {
625 ot_try_bind( serverip, 6969, FLAG_TCP ); 760 ot_try_bind(serverip, 6969, FLAG_TCP);
626 ot_try_bind( serverip, 6969, FLAG_UDP ); 761 ot_try_bind(serverip, 6969, FLAG_UDP);
627 } 762 }
628 763
764 defaul_signal_handlers();
765
629#ifdef WANT_SYSLOGS 766#ifdef WANT_SYSLOGS
630 openlog( "opentracker", 0, LOG_USER ); 767 openlog("opentracker", 0, LOG_USER);
631 setlogmask(LOG_UPTO(LOG_INFO)); 768 setlogmask(LOG_UPTO(LOG_INFO));
632#endif 769#endif
633 770
634 if( drop_privileges( g_serveruser ? g_serveruser : "nobody", g_serverdir ) == -1 ) 771 if (drop_privileges(g_serveruser ? g_serveruser : "nobody", g_serverdir) == -1)
635 panic( "drop_privileges failed, exiting. Last error"); 772 panic("drop_privileges failed, exiting. Last error");
636 773
637 g_now_seconds = time( NULL ); 774 g_now_seconds = time(NULL);
775 pthread_create(&thread_id, NULL, time_caching_worker, NULL);
638 776
639 /* Create our self pipe which allows us to interrupt mainloops 777 /* Create our self pipe which allows us to interrupt mainloops
640 io_wait in case some data is available to send out */ 778 io_wait in case some data is available to send out */
641 if( pipe( g_self_pipe ) == -1 ) 779 if (pipe(g_self_pipe) == -1)
642 panic( "selfpipe failed: " ); 780 panic("selfpipe failed: ");
643 if( !io_fd( g_self_pipe[0] ) ) 781 if (!io_fd(g_self_pipe[0]))
644 panic( "selfpipe io_fd failed: " ); 782 panic("selfpipe io_fd failed: ");
645 if( !io_fd( g_self_pipe[1] ) ) 783 if (!io_fd(g_self_pipe[1]))
646 panic( "selfpipe io_fd failed: " ); 784 panic("selfpipe io_fd failed: ");
647 io_setcookie( g_self_pipe[0], (void*)FLAG_SELFPIPE ); 785 io_setcookie(g_self_pipe[0], (void *)FLAG_SELFPIPE);
648 io_wantread( g_self_pipe[0] ); 786 io_wantread(g_self_pipe[0]);
649 787
650 defaul_signal_handlers( );
651 /* Init all sub systems. This call may fail with an exit() */ 788 /* Init all sub systems. This call may fail with an exit() */
652 trackerlogic_init( ); 789 trackerlogic_init();
653 790
654 if( statefile ) 791#ifdef _DEBUG_RANDOMTORRENTS
655 load_state( statefile ); 792 fprintf(stderr, "DEBUG: Generating %d random peers on random torrents. This may take a while. (Setting RANDOMTORRENTS in trackerlogic.h)\n", RANDOMTORRENTS);
793 trackerlogic_add_random_torrents(RANDOMTORRENTS);
794 fprintf(stderr, "... done.\n");
795#endif
656 796
657 install_signal_handlers( ); 797 if (statefile)
798 load_state(statefile);
658 799
659 if( !g_udp_workers ) 800 install_signal_handlers();
660 udp_init( -1, 0 );
661 801
662 /* Kick off our initial clock setting alarm */ 802 if (!g_udp_workers)
663 alarm(5); 803 udp_init(-1, 0);
664 804
665 server_mainloop( 0 ); 805 server_mainloop(0);
666 806
667 return 0; 807 return 0;
668} 808}
669
670const char *g_version_opentracker_c = "$Source$: $Revision$\n";
diff --git a/opentracker.conf.sample b/opentracker.conf.sample
index db45122..054e405 100644
--- a/opentracker.conf.sample
+++ b/opentracker.conf.sample
@@ -2,7 +2,7 @@
2# 2#
3 3
4# I) Address opentracker will listen on, using both, tcp AND udp family 4# I) Address opentracker will listen on, using both, tcp AND udp family
5# (note, that port 6969 is implicite if ommitted). 5# (note, that port 6969 is implicit if omitted).
6# 6#
7# If no listen option is given (here or on the command line), opentracker 7# If no listen option is given (here or on the command line), opentracker
8# listens on 0.0.0.0:6969 tcp and udp. 8# listens on 0.0.0.0:6969 tcp and udp.
@@ -44,17 +44,65 @@
44# listing, so choose one of those options at compile time. File format 44# listing, so choose one of those options at compile time. File format
45# is straight forward: "<hex info hash>\n<hex info hash>\n..." 45# is straight forward: "<hex info hash>\n<hex info hash>\n..."
46# 46#
47# IIa) You can enable dynamic changesets to accesslists by enabling
48# WANT_DYNAMIC_ACCESSLIST.
49#
50# The suggested way to work with dynamic changeset lists is to keep a
51# main accesslist file that is loaded when opentracker (re)starts and
52# reloaded infrequently (hourly or daily).
53#
54# All changes to the accesslist (e.g. from a web frontend) should be
55# both appended to or removed from that file and sent to opentracker. By
56# keeping dynamic changeset lists, you can avoid reloading huge
57# accesslists whenever just a single entry is added or removed.
58#
59# Any info_hash (format see above) written to the fifo_add file will be
60# kept on a dynamic add-changeset, removed from the dynamic
61# delete-changeset and treated as if it was in the main accesslist file.
62# The semantic of the respective dynamic changeset depends on whether
63# WANT_ACCESSLIST_WHITE or WANT_ACCESSLIST_BLACK is enabled.
64#
65# access.fifo_add /var/run/opentracker/adder.fifo
66#
67# Any info_hash (format see above) written to the fifo_delete file will
68# be kept on a dynamic delete-changeset, removed from the dynamic
69# add-changeset and treated as if it was not in the main accesslist
70# file.
71#
72# access.fifo_delete /var/run/opentracker/deleter.fifo
73#
74# If you reload the accesslist by sending SIGHUP to the tracker process,
75# the dynamic lists are flushed, as opentracker assumes thoses lists are
76# merged into the main accesslist.
77#
78# NOTE: While you can have multiple writers sending lines to the fifos,
79# any writes larger than PIPE_BUF (see your limits.h, minimally 512
80# bytes but usually 4096) may be interleaved with data sent by other
81# writers. This can lead to unparsable lines of info_hashes.
82#
83# IIb)
47# If you do not want to grant anyone access to your stats, enable the 84# If you do not want to grant anyone access to your stats, enable the
48# WANT_RESTRICT_STATS option in Makefile and bless the ip addresses 85# WANT_RESTRICT_STATS option in Makefile and bless the ip addresses
49# allowed to fetch stats here. 86# or network allowed to fetch stats here.
50# 87#
51# access.stats 192.168.0.23 88# access.stats 192.168.0.23
89# access.stats 10.1.1.23
52# 90#
53# There is another way of hiding your stats. You can obfuscate the path 91# There is another way of hiding your stats. You can obfuscate the path
54# to them. Normally it is located at /stats but you can configure it to 92# to them. Normally it is located at /stats but you can configure it to
55# appear anywhere on your tracker. 93# appear anywhere on your tracker.
56# 94#
57# access.stats_path stats 95# access.stats_path stats
96#
97# II
98# If opentracker lives behind one or multiple reverse proxies,
99# every http connection appears to come from these proxies. In order to
100# take the X-Forwarded-For address instead, compile opentracker with the
101# WANT_IP_FROM_PROXY option and set your proxy addresses or networkss here.
102#
103# access.proxy 10.0.1.23
104# access.proxy 192.0.0.0/8
105#
58 106
59# III) Live sync uses udp multicast packets to keep a cluster of opentrackers 107# III) Live sync uses udp multicast packets to keep a cluster of opentrackers
60# synchronized. This option tells opentracker which port to listen for 108# synchronized. This option tells opentracker which port to listen for
diff --git a/ot_accesslist.c b/ot_accesslist.c
index cdb964d..4b88c40 100644
--- a/ot_accesslist.c
+++ b/ot_accesslist.c
@@ -5,104 +5,201 @@
5 5
6/* System */ 6/* System */
7#include <pthread.h> 7#include <pthread.h>
8#include <signal.h>
9#include <stdio.h>
8#include <stdlib.h> 10#include <stdlib.h>
9#include <string.h> 11#include <string.h>
10#include <stdio.h>
11#include <signal.h>
12#include <unistd.h> 12#include <unistd.h>
13#ifdef WANT_DYNAMIC_ACCESSLIST
14#include <errno.h>
15#include <sys/stat.h>
16#include <sys/types.h>
17#endif
13 18
14/* Libowfat */ 19/* Libowfat */
15#include "byte.h" 20#include "byte.h"
16#include "scan.h" 21#include "fmt.h"
17#include "ip6.h" 22#include "ip6.h"
18#include "mmap.h" 23#include "mmap.h"
24#include "scan.h"
19 25
20/* Opentracker */ 26/* Opentracker */
21#include "trackerlogic.h"
22#include "ot_accesslist.h" 27#include "ot_accesslist.h"
23#include "ot_vector.h" 28#include "ot_vector.h"
29#include "trackerlogic.h"
24 30
25/* GLOBAL VARIABLES */ 31/* GLOBAL VARIABLES */
26#ifdef WANT_ACCESSLIST 32#ifdef WANT_ACCESSLIST
27 char *g_accesslist_filename; 33char *g_accesslist_filename = NULL;
28static ot_hash *g_accesslist; 34#ifdef WANT_DYNAMIC_ACCESSLIST
29static size_t g_accesslist_size; 35char *g_accesslist_pipe_add = NULL;
36char *g_accesslist_pipe_delete = NULL;
37#endif
30static pthread_mutex_t g_accesslist_mutex; 38static pthread_mutex_t g_accesslist_mutex;
31 39
32static int vector_compare_hash(const void *hash1, const void *hash2 ) { 40/* Accesslists are lock free linked lists. We can not make them locking, because every announce
33 return memcmp( hash1, hash2, OT_HASH_COMPARE_SIZE ); 41 would try to acquire the mutex, making it the most contested mutex in the whole of opentracker,
42 basically creating a central performance choke point.
43
44 The idea is that updating the list heads happens under the g_accesslist_mutex guard and is
45 done atomically, while consumers might potentially still hold pointers deeper inside the list.
46
47 Consumers (for now only via accesslist_hashisvalid) will always fetch the list head pointer
48 that is guaranteed to live for at least five minutes. This should be many orders of magnitudes
49 more than how long it will be needed by the bsearch done on the list. */
50struct ot_accesslist;
51typedef struct ot_accesslist ot_accesslist;
52struct ot_accesslist {
53 ot_hash *list;
54 size_t size;
55 ot_time base;
56 ot_accesslist *next;
57};
58static ot_accesslist *_Atomic g_accesslist = NULL;
59#ifdef WANT_DYNAMIC_ACCESSLIST
60static ot_accesslist *_Atomic g_accesslist_add = NULL;
61static ot_accesslist *_Atomic g_accesslist_delete = NULL;
62#endif
63
64/* Helpers to work on access lists */
65static int vector_compare_hash(const void *hash1, const void *hash2) { return memcmp(hash1, hash2, OT_HASH_COMPARE_SIZE); }
66
67static ot_accesslist *accesslist_free(ot_accesslist *accesslist) {
68 while (accesslist) {
69 ot_accesslist *this_accesslist = accesslist;
70 accesslist = this_accesslist->next;
71 free(this_accesslist->list);
72 free(this_accesslist);
73 }
74 return NULL;
75}
76
77static ot_accesslist *accesslist_make(ot_accesslist *next, size_t size) {
78 ot_accesslist *accesslist_new = malloc(sizeof(ot_accesslist));
79 if (accesslist_new) {
80 accesslist_new->list = size ? malloc(sizeof(ot_hash) * size) : NULL;
81 accesslist_new->size = size;
82 accesslist_new->base = g_now_minutes;
83 accesslist_new->next = next;
84 if (size && !accesslist_new->list) {
85 free(accesslist_new);
86 accesslist_new = NULL;
87 }
88 }
89 return accesslist_new;
90}
91
92/* This must be called with g_accesslist_mutex held.
93 This will never delete head, because that might still be in use. */
94static void accesslist_clean(ot_accesslist *accesslist) {
95 while (accesslist && accesslist->next) {
96 if (accesslist->next->base + 5 < g_now_minutes)
97 accesslist->next = accesslist_free(accesslist->next);
98 accesslist = accesslist->next;
99 }
34} 100}
35 101
36/* Read initial access list */ 102/* Read initial access list */
37static void accesslist_readfile( void ) { 103static void accesslist_readfile(void) {
38 ot_hash *info_hash, *accesslist_new = NULL; 104 ot_accesslist *accesslist_new;
39 char *map, *map_end, *read_offs; 105 ot_hash *info_hash;
40 size_t maplen; 106 const char *map, *map_end, *read_offs;
41 107 size_t maplen;
42 if( ( map = mmap_read( g_accesslist_filename, &maplen ) ) == NULL ) { 108
43 char *wd = getcwd( NULL, 0 ); 109 if ((map = mmap_read(g_accesslist_filename, &maplen)) == NULL) {
44 fprintf( stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd ); 110 char *wd = getcwd(NULL, 0);
45 free( wd ); 111 fprintf(stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd);
112 free(wd);
46 return; 113 return;
47 } 114 }
48 115
49 /* You need at least 41 bytes to pass an info_hash, make enough room 116 /* You need at least 41 bytes to pass an info_hash, make enough room
50 for the maximum amount of them */ 117 for the maximum amount of them */
51 info_hash = accesslist_new = malloc( ( maplen / 41 ) * 20 ); 118 accesslist_new = accesslist_make(g_accesslist, maplen / 41);
52 if( !accesslist_new ) { 119 if (!accesslist_new) {
53 fprintf( stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", ( maplen / 41 ) * 20 ); 120 fprintf(stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", (maplen / 41) * 20);
121 mmap_unmap(map, maplen);
54 return; 122 return;
55 } 123 }
124 info_hash = accesslist_new->list;
56 125
57 /* No use to scan if there's not enough room for another full info_hash */ 126 /* No use to scan if there's not enough room for another full info_hash */
58 map_end = map + maplen - 40; 127 map_end = map + maplen - 40;
59 read_offs = map; 128 read_offs = map;
60 129
61 /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" */ 130 /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" */
62 while( read_offs <= map_end ) { 131 while (read_offs <= map_end) {
63 int i; 132 int i;
64 for( i=0; i<(int)sizeof(ot_hash); ++i ) { 133 for (i = 0; i < (int)sizeof(ot_hash); ++i) {
65 int eger1 = scan_fromhex( read_offs[ 2*i ] ); 134 int eger1 = scan_fromhex((unsigned char)read_offs[2 * i]);
66 int eger2 = scan_fromhex( read_offs[ 1 + 2*i ] ); 135 int eger2 = scan_fromhex((unsigned char)read_offs[1 + 2 * i]);
67 if( eger1 < 0 || eger2 < 0 ) 136 if (eger1 < 0 || eger2 < 0)
68 break; 137 break;
69 (*info_hash)[i] = eger1 * 16 + eger2; 138 (*info_hash)[i] = (uint8_t)(eger1 * 16 + eger2);
70 } 139 }
71 140
72 if( i == sizeof(ot_hash) ) { 141 if (i == sizeof(ot_hash)) {
73 read_offs += 40; 142 read_offs += 40;
74 143
75 /* Append accesslist to accesslist vector */ 144 /* Append accesslist to accesslist vector */
76 if( read_offs == map_end || scan_fromhex( *read_offs ) < 0 ) 145 if (read_offs == map_end || scan_fromhex((unsigned char)*read_offs) < 0)
77 ++info_hash; 146 ++info_hash;
78 } 147 }
79 148
80 /* Find start of next line */ 149 /* Find start of next line */
81 while( read_offs <= map_end && *(read_offs++) != '\n' ); 150 while (read_offs <= map_end && *(read_offs++) != '\n')
151 ;
82 } 152 }
83#ifdef _DEBUG 153#ifdef _DEBUG
84 fprintf( stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new) ); 154 fprintf(stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new->list));
85#endif 155#endif
86 156
87 mmap_unmap( map, maplen); 157 mmap_unmap(map, maplen);
88 158
89 qsort( accesslist_new, info_hash - accesslist_new, sizeof( *info_hash ), vector_compare_hash ); 159 qsort(accesslist_new->list, info_hash - accesslist_new->list, sizeof(*info_hash), vector_compare_hash);
160 accesslist_new->size = info_hash - accesslist_new->list;
90 161
91 /* Now exchange the accesslist vector in the least race condition prone way */ 162 /* Now exchange the accesslist vector in the least race condition prone way */
92 pthread_mutex_lock(&g_accesslist_mutex); 163 pthread_mutex_lock(&g_accesslist_mutex);
93 free( g_accesslist ); 164 accesslist_new->next = g_accesslist;
94 g_accesslist = accesslist_new; 165 g_accesslist = accesslist_new; /* Only now set a new list */
95 g_accesslist_size = info_hash - accesslist_new; 166
167#ifdef WANT_DYNAMIC_ACCESSLIST
168 /* If we have dynamic accesslists, reloading a new one will always void the add/delete lists.
169 Insert empty ones at the list head */
170 if (g_accesslist_add && (accesslist_new = accesslist_make(g_accesslist_add, 0)) != NULL)
171 g_accesslist_add = accesslist_new;
172 if (g_accesslist_delete && (accesslist_new = accesslist_make(g_accesslist_delete, 0)) != NULL)
173 g_accesslist_delete = accesslist_new;
174#endif
175
176 accesslist_clean(g_accesslist);
177
96 pthread_mutex_unlock(&g_accesslist_mutex); 178 pthread_mutex_unlock(&g_accesslist_mutex);
97} 179}
98 180
99int accesslist_hashisvalid( ot_hash hash ) { 181int accesslist_hashisvalid(ot_hash hash) {
100 void *exactmatch; 182 /* Get working copy of current access list */
183 ot_accesslist *accesslist = g_accesslist;
184#ifdef WANT_DYNAMIC_ACCESSLIST
185 ot_accesslist *accesslist_add, *accesslist_delete;
186#endif
187 void *exactmatch = NULL;
101 188
102 /* Lock should hardly ever be contended */ 189 if (accesslist)
103 pthread_mutex_lock(&g_accesslist_mutex); 190 exactmatch = bsearch(hash, accesslist->list, accesslist->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
104 exactmatch = bsearch( hash, g_accesslist, g_accesslist_size, OT_HASH_COMPARE_SIZE, vector_compare_hash ); 191
105 pthread_mutex_unlock(&g_accesslist_mutex); 192#ifdef WANT_DYNAMIC_ACCESSLIST
193 /* If we had no match on the main list, scan the list of dynamically added hashes */
194 accesslist_add = g_accesslist_add;
195 if ((exactmatch == NULL) && accesslist_add)
196 exactmatch = bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
197
198 /* If we found a matching hash on the main list, scan the list of dynamically deleted hashes */
199 accesslist_delete = g_accesslist_delete;
200 if ((exactmatch != NULL) && accesslist_delete && bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash))
201 exactmatch = NULL;
202#endif
106 203
107#ifdef WANT_ACCESSLIST_BLACK 204#ifdef WANT_ACCESSLIST_BLACK
108 return exactmatch == NULL; 205 return exactmatch == NULL;
@@ -111,70 +208,210 @@ int accesslist_hashisvalid( ot_hash hash ) {
111#endif 208#endif
112} 209}
113 210
114static void * accesslist_worker( void * args ) { 211static void *accesslist_worker(void *args) {
115 int sig; 212 int sig;
116 sigset_t signal_mask; 213 sigset_t signal_mask;
117 214
118 sigemptyset(&signal_mask); 215 sigemptyset(&signal_mask);
119 sigaddset(&signal_mask, SIGHUP); 216 sigaddset(&signal_mask, SIGHUP);
120 217
121 (void)args; 218 (void)args;
122 219
123 while( 1 ) { 220 while (1) {
221 if (!g_opentracker_running)
222 return NULL;
124 223
125 /* Initial attempt to read accesslist */ 224 /* Initial attempt to read accesslist */
126 accesslist_readfile( ); 225 accesslist_readfile();
127 226
128 /* Wait for signals */ 227 /* Wait for signals */
129 while( sigwait (&signal_mask, &sig) != 0 && sig != SIGHUP ); 228 while (sigwait(&signal_mask, &sig) != 0 && sig != SIGHUP)
229 ;
230 }
231 return NULL;
232}
233
234#ifdef WANT_DYNAMIC_ACCESSLIST
235static pthread_t thread_adder_id, thread_deleter_id;
236static void *accesslist_adddel_worker(char *fifoname, ot_accesslist *_Atomic *adding_to, ot_accesslist *_Atomic *removing_from) {
237 struct stat st;
238
239 if (!stat(fifoname, &st)) {
240 if (!S_ISFIFO(st.st_mode)) {
241 fprintf(stderr, "Error when starting dynamic accesslists: Found Non-FIFO file at %s.\nPlease remove it and restart opentracker.\n", fifoname);
242 return NULL;
243 }
244 } else {
245 int error = mkfifo(fifoname, 0755);
246 if (error && error != EEXIST) {
247 fprintf(stderr, "Error when starting dynamic accesslists: Couldn't create FIFO at %s, error: %s\n", fifoname, strerror(errno));
248 return NULL;
249 }
250 }
251
252 while (g_opentracker_running) {
253 FILE *fifo = fopen(fifoname, "r");
254 char *line = NULL;
255 size_t linecap = 0;
256 ssize_t linelen;
257
258 if (!fifo) {
259 fprintf(stderr, "Error when reading dynamic accesslists: Couldn't open FIFO at %s, error: %s\n", fifoname, strerror(errno));
260 return NULL;
261 }
262
263 while ((linelen = getline(&line, &linecap, fifo)) > 0) {
264 ot_hash info_hash;
265 int i;
266
267 printf("Got line %*s", (int)linelen, line);
268 /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*"
269 If there's not enough characters for an info_hash in the line, skip it. */
270 if (linelen < 41)
271 continue;
272
273 for (i = 0; i < (int)sizeof(ot_hash); ++i) {
274 int eger1 = scan_fromhex((unsigned char)line[2 * i]);
275 int eger2 = scan_fromhex((unsigned char)line[1 + 2 * i]);
276 if (eger1 < 0 || eger2 < 0)
277 break;
278 ((uint8_t *)info_hash)[i] = (uint8_t)(eger1 * 16 + eger2);
279 }
280 printf("parsed info_hash %20s\n", info_hash);
281 if (i != sizeof(ot_hash))
282 continue;
283
284 /* From now on we modify g_accesslist_add and g_accesslist_delete, so prevent the
285 other worker threads from doing the same */
286 pthread_mutex_lock(&g_accesslist_mutex);
287
288 /* If the info hash is in the removing_from list, create a new head without that entry */
289 if (*removing_from && (*removing_from)->list) {
290 ot_hash *exactmatch = bsearch(info_hash, (*removing_from)->list, (*removing_from)->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
291 if (exactmatch) {
292 ptrdiff_t off = exactmatch - (*removing_from)->list;
293 ot_accesslist *accesslist_new = accesslist_make(*removing_from, (*removing_from)->size - 1);
294 if (accesslist_new) {
295 memcpy(accesslist_new->list, (*removing_from)->list, sizeof(ot_hash) * off);
296 memcpy(accesslist_new->list + off, (*removing_from)->list + off + 1, (*removing_from)->size - off - 1);
297 *removing_from = accesslist_new;
298 }
299 }
300 }
301
302 /* Simple case: there's no adding_to list yet, create one with one member */
303 if (!*adding_to) {
304 ot_accesslist *accesslist_new = accesslist_make(NULL, 1);
305 if (accesslist_new) {
306 memcpy(accesslist_new->list, info_hash, sizeof(ot_hash));
307 *adding_to = accesslist_new;
308 }
309 } else {
310 int exactmatch = 0;
311 ot_hash *insert_point = binary_search(info_hash, (*adding_to)->list, (*adding_to)->size, OT_HASH_COMPARE_SIZE, sizeof(ot_hash), &exactmatch);
312
313 /* Only if the info hash is not in the adding_to list, create a new head with that entry */
314 if (!exactmatch) {
315 ot_accesslist *accesslist_new = accesslist_make(*adding_to, (*adding_to)->size + 1);
316 ptrdiff_t off = insert_point - (*adding_to)->list;
317 if (accesslist_new) {
318 memcpy(accesslist_new->list, (*adding_to)->list, sizeof(ot_hash) * off);
319 memcpy(accesslist_new->list + off, info_hash, sizeof(info_hash));
320 memcpy(accesslist_new->list + off + 1, (*adding_to)->list + off, (*adding_to)->size - off);
321 *adding_to = accesslist_new;
322 }
323 }
324 }
325
326 pthread_mutex_unlock(&g_accesslist_mutex);
327 }
328
329 fclose(fifo);
130 } 330 }
131 return NULL; 331 return NULL;
132} 332}
133 333
334static void *accesslist_adder_worker(void *args) {
335 (void)args;
336 return accesslist_adddel_worker(g_accesslist_pipe_add, &g_accesslist_add, &g_accesslist_delete);
337}
338static void *accesslist_deleter_worker(void *args) {
339 (void)args;
340 return accesslist_adddel_worker(g_accesslist_pipe_delete, &g_accesslist_delete, &g_accesslist_add);
341}
342#endif
343
134static pthread_t thread_id; 344static pthread_t thread_id;
135void accesslist_init( ) { 345void accesslist_init() {
136 pthread_mutex_init(&g_accesslist_mutex, NULL); 346 pthread_mutex_init(&g_accesslist_mutex, NULL);
137 pthread_create( &thread_id, NULL, accesslist_worker, NULL ); 347 pthread_create(&thread_id, NULL, accesslist_worker, NULL);
348#ifdef WANT_DYNAMIC_ACCESSLIST
349 if (g_accesslist_pipe_add)
350 pthread_create(&thread_adder_id, NULL, accesslist_adder_worker, NULL);
351 if (g_accesslist_pipe_delete)
352 pthread_create(&thread_deleter_id, NULL, accesslist_deleter_worker, NULL);
353#endif
138} 354}
139 355
140void accesslist_deinit( void ) { 356void accesslist_deinit(void) {
141 pthread_cancel( thread_id ); 357 /* Wake up sleeping worker */
358 pthread_kill(thread_id, SIGHUP);
359
360 pthread_mutex_lock(&g_accesslist_mutex);
361
362 g_accesslist = accesslist_free(g_accesslist);
363
364#ifdef WANT_DYNAMIC_ACCESSLIST
365 g_accesslist_add = accesslist_free(g_accesslist_add);
366 g_accesslist_delete = accesslist_free(g_accesslist_delete);
367#endif
368
369 pthread_mutex_unlock(&g_accesslist_mutex);
370 pthread_cancel(thread_id);
142 pthread_mutex_destroy(&g_accesslist_mutex); 371 pthread_mutex_destroy(&g_accesslist_mutex);
143 free( g_accesslist ); 372}
144 g_accesslist = 0; 373
145 g_accesslist_size = 0; 374void accesslist_cleanup(void) {
375 pthread_mutex_lock(&g_accesslist_mutex);
376
377 accesslist_clean(g_accesslist);
378#if WANT_DYNAMIC_ACCESSLIST
379 accesslist_clean(g_accesslist_add);
380 accesslist_clean(g_accesslist_delete);
381#endif
382
383 pthread_mutex_unlock(&g_accesslist_mutex);
146} 384}
147#endif 385#endif
148 386
149int address_in_net( const ot_ip6 address, const ot_net *net ) { 387int address_in_net(const ot_ip6 address, const ot_net *net) {
150 int bits = net->bits; 388 int bits = net->bits, checkbits = (0x7f00 >> (bits & 7));
151 int result = memcmp( address, &net->address, bits >> 3 ); 389 int result = memcmp(address, &net->address, bits >> 3);
152 if( !result && ( bits & 7 ) ) 390 if (!result && (bits & 7))
153 result = ( ( 0x7f00 >> ( bits & 7 ) ) & address[bits>>3] ) - net->address[bits>>3]; 391 result = (checkbits & address[bits >> 3]) - (checkbits & net->address[bits >> 3]);
154 return result == 0; 392 return result == 0;
155} 393}
156 394
157void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size ) { 395void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size) {
158 size_t i; 396 size_t i;
159 int exactmatch; 397 int exactmatch;
160 398
161 /* Caller must have a concept of ot_net in it's member */ 399 /* Caller must have a concept of ot_net in it's member */
162 if( member_size < sizeof(ot_net) ) 400 if (member_size < sizeof(ot_net))
163 return 0; 401 return 0;
164 402
165 /* Check each net in vector for overlap */ 403 /* Check each net in vector for overlap */
166 uint8_t *member = ((uint8_t*)vector->data); 404 uint8_t *member = ((uint8_t *)vector->data);
167 for( i=0; i<vector->size; ++i ) { 405 for (i = 0; i < vector->size; ++i) {
168 if( address_in_net( *(ot_ip6*)member, net ) || 406 if (address_in_net(*(ot_ip6 *)member, net) || address_in_net(net->address, (ot_net *)member))
169 address_in_net( net->address, (ot_net*)member ) )
170 return 0; 407 return 0;
171 member += member_size; 408 member += member_size;
172 } 409 }
173 410
174 member = vector_find_or_insert( vector, (void*)net, member_size, sizeof(ot_net), &exactmatch ); 411 member = vector_find_or_insert(vector, (void *)net, member_size, sizeof(ot_net), &exactmatch);
175 if( member ) { 412 if (member) {
176 memcpy( member, net, sizeof(ot_net)); 413 memcpy(member, net, sizeof(ot_net));
177 memcpy( member + sizeof(ot_net), value, member_size - sizeof(ot_net)); 414 memcpy(member + sizeof(ot_net), value, member_size - sizeof(ot_net));
178 } 415 }
179 416
180 return member; 417 return member;
@@ -182,43 +419,43 @@ void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value
182 419
183/* Takes a vector filled with { ot_net net, uint8_t[x] value }; 420/* Takes a vector filled with { ot_net net, uint8_t[x] value };
184 Returns value associated with the net, or NULL if not found */ 421 Returns value associated with the net, or NULL if not found */
185void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size ) { 422void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size) {
186 int exactmatch; 423 int exactmatch;
187 /* This binary search will return a pointer to the first non-containing network... */ 424 /* This binary search will return a pointer to the first non-containing network... */
188 ot_net *net = binary_search( address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch ); 425 ot_net *net = binary_search(address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch);
189 if( !net ) 426 if (!net)
190 return NULL; 427 return NULL;
191 /* ... so we'll need to move back one step unless we've exactly hit the first address in network */ 428 /* ... so we'll need to move back one step unless we've exactly hit the first address in network */
192 if( !exactmatch && ( (void*)net > vector->data ) ) 429 if (!exactmatch && ((void *)net > vector->data))
193 --net; 430 --net;
194 if( !address_in_net( address, net ) ) 431 if (!address_in_net(address, net))
195 return NULL; 432 return NULL;
196 return (void*)net; 433 return (void *)net;
197} 434}
198 435
199#ifdef WANT_FULLLOG_NETWORKS 436#ifdef WANT_FULLLOG_NETWORKS
200static ot_vector g_lognets_list; 437static ot_vector g_lognets_list;
201ot_log *g_logchain_first, *g_logchain_last; 438ot_log *g_logchain_first, *g_logchain_last;
202
203static pthread_mutex_t g_lognets_list_mutex = PTHREAD_MUTEX_INITIALIZER; 439static pthread_mutex_t g_lognets_list_mutex = PTHREAD_MUTEX_INITIALIZER;
204void loglist_add_network( const ot_net *net ) { 440
441void loglist_add_network(const ot_net *net) {
205 pthread_mutex_lock(&g_lognets_list_mutex); 442 pthread_mutex_lock(&g_lognets_list_mutex);
206 set_value_for_net( net, &g_lognets_list, NULL, sizeof(ot_net)); 443 set_value_for_net(net, &g_lognets_list, NULL, sizeof(ot_net));
207 pthread_mutex_unlock(&g_lognets_list_mutex); 444 pthread_mutex_unlock(&g_lognets_list_mutex);
208} 445}
209 446
210void loglist_reset( ) { 447void loglist_reset() {
211 pthread_mutex_lock(&g_lognets_list_mutex); 448 pthread_mutex_lock(&g_lognets_list_mutex);
212 free( g_lognets_list.data ); 449 free(g_lognets_list.data);
213 g_lognets_list.data = 0; 450 g_lognets_list.data = 0;
214 g_lognets_list.size = g_lognets_list.space = 0; 451 g_lognets_list.size = g_lognets_list.space = 0;
215 pthread_mutex_unlock(&g_lognets_list_mutex); 452 pthread_mutex_unlock(&g_lognets_list_mutex);
216} 453}
217 454
218int loglist_check_address( const ot_ip6 address ) { 455int loglist_check_address(const ot_ip6 address) {
219 int result; 456 int result;
220 pthread_mutex_lock(&g_lognets_list_mutex); 457 pthread_mutex_lock(&g_lognets_list_mutex);
221 result = ( NULL != get_value_for_net( address, &g_lognets_list, sizeof(ot_net)) ); 458 result = (NULL != get_value_for_net(address, &g_lognets_list, sizeof(ot_net)));
222 pthread_mutex_unlock(&g_lognets_list_mutex); 459 pthread_mutex_unlock(&g_lognets_list_mutex);
223 return result; 460 return result;
224} 461}
@@ -226,44 +463,44 @@ int loglist_check_address( const ot_ip6 address ) {
226 463
227#ifdef WANT_IP_FROM_PROXY 464#ifdef WANT_IP_FROM_PROXY
228typedef struct { 465typedef struct {
229 ot_net *proxy; 466 ot_net *proxy;
230 ot_vector networks; 467 ot_vector networks;
231} ot_proxymap; 468} ot_proxymap;
232 469
233static ot_vector g_proxies_list; 470static ot_vector g_proxies_list;
234static pthread_mutex_t g_proxies_list_mutex = PTHREAD_MUTEX_INITIALIZER; 471static pthread_mutex_t g_proxies_list_mutex = PTHREAD_MUTEX_INITIALIZER;
235 472
236int proxylist_add_network( const ot_net *proxy, const ot_net *net ) { 473int proxylist_add_network(const ot_net *proxy, const ot_net *net) {
237 ot_proxymap *map; 474 ot_proxymap *map;
238 int exactmatch, result = 1; 475 int exactmatch, result = 1;
239 pthread_mutex_lock(&g_proxies_list_mutex); 476 pthread_mutex_lock(&g_proxies_list_mutex);
240 477
241 /* If we have a direct hit, use and extend the vector there */ 478 /* If we have a direct hit, use and extend the vector there */
242 map = binary_search( proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch ); 479 map = binary_search(proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch);
243 480
244 if( !map || !exactmatch ) { 481 if (!map || !exactmatch) {
245 /* else see, if we've got overlapping networks 482 /* else see, if we've got overlapping networks
246 and get a new empty vector if not */ 483 and get a new empty vector if not */
247 ot_vector empty; 484 ot_vector empty;
248 memset( &empty, 0, sizeof( ot_vector ) ); 485 memset(&empty, 0, sizeof(ot_vector));
249 map = set_value_for_net( proxy, &g_proxies_list, &empty, sizeof(ot_proxymap)); 486 map = set_value_for_net(proxy, &g_proxies_list, &empty, sizeof(ot_proxymap));
250 } 487 }
251 488
252 if( map && set_value_for_net( net, &map->networks, NULL, sizeof(ot_net) ) ) 489 if (map && set_value_for_net(net, &map->networks, NULL, sizeof(ot_net)))
253 result = 1; 490 result = 1;
254 491
255 pthread_mutex_unlock(&g_proxies_list_mutex); 492 pthread_mutex_unlock(&g_proxies_list_mutex);
256 return result; 493 return result;
257} 494}
258 495
259int proxylist_check_proxy( const ot_ip6 proxy, const ot_ip6 address ) { 496int proxylist_check_proxy(const ot_ip6 proxy, const ot_ip6 address) {
260 int result = 0; 497 int result = 0;
261 ot_proxymap *map; 498 ot_proxymap *map;
262 499
263 pthread_mutex_lock(&g_proxies_list_mutex); 500 pthread_mutex_lock(&g_proxies_list_mutex);
264 501
265 if( ( map = get_value_for_net( proxy, &g_proxies_list, sizeof(ot_proxymap) ) ) ) 502 if ((map = get_value_for_net(proxy, &g_proxies_list, sizeof(ot_proxymap))))
266 if( !address || get_value_for_net( address, &map->networks, sizeof(ot_net) ) ) 503 if (!address || get_value_for_net(address, &map->networks, sizeof(ot_net)))
267 result = 1; 504 result = 1;
268 505
269 pthread_mutex_unlock(&g_proxies_list_mutex); 506 pthread_mutex_unlock(&g_proxies_list_mutex);
@@ -272,42 +509,53 @@ int proxylist_check_proxy( const ot_ip6 proxy, const ot_ip6 address ) {
272 509
273#endif 510#endif
274 511
275static ot_ip6 g_adminip_addresses[OT_ADMINIP_MAX]; 512static ot_net g_admin_nets[OT_ADMINIP_MAX];
276static ot_permissions g_adminip_permissions[OT_ADMINIP_MAX]; 513static ot_permissions g_admin_nets_permissions[OT_ADMINIP_MAX];
277static unsigned int g_adminip_count = 0; 514static unsigned int g_admin_nets_count = 0;
278 515
279int accesslist_blessip( ot_ip6 ip, ot_permissions permissions ) { 516int accesslist_bless_net(ot_net *net, ot_permissions permissions) {
280 if( g_adminip_count >= OT_ADMINIP_MAX ) 517 if (g_admin_nets_count >= OT_ADMINIP_MAX)
281 return -1; 518 return -1;
282 519
283 memcpy(g_adminip_addresses + g_adminip_count,ip,sizeof(ot_ip6)); 520 memcpy(g_admin_nets + g_admin_nets_count, net, sizeof(ot_net));
284 g_adminip_permissions[ g_adminip_count++ ] = permissions; 521 g_admin_nets_permissions[g_admin_nets_count++] = permissions;
285 522
286#ifdef _DEBUG 523#ifdef _DEBUG
287 { 524 {
288 char _debug[512]; 525 char _debug[512];
289 int off = snprintf( _debug, sizeof(_debug), "Blessing ip address " ); 526 int off = snprintf(_debug, sizeof(_debug), "Blessing ip net ");
290 off += fmt_ip6c(_debug+off, ip ); 527 off += fmt_ip6c(_debug + off, net->address);
291 528 if (net->bits < 128) {
292 if( permissions & OT_PERMISSION_MAY_STAT ) off += snprintf( _debug+off, 512-off, " may_fetch_stats" ); 529 _debug[off++] = '/';
293 if( permissions & OT_PERMISSION_MAY_LIVESYNC ) off += snprintf( _debug+off, 512-off, " may_sync_live" ); 530 if (ip6_isv4mapped(net->address))
294 if( permissions & OT_PERMISSION_MAY_FULLSCRAPE ) off += snprintf( _debug+off, 512-off, " may_fetch_fullscrapes" ); 531 off += fmt_long(_debug + off, net->bits - 96);
295 if( permissions & OT_PERMISSION_MAY_PROXY ) off += snprintf( _debug+off, 512-off, " may_proxy" ); 532 else
296 if( !permissions ) off += snprintf( _debug+off, sizeof(_debug)-off, " nothing\n" ); 533 off += fmt_long(_debug + off, net->bits);
534 }
535
536 if (permissions & OT_PERMISSION_MAY_STAT)
537 off += snprintf(_debug + off, 512 - off, " may_fetch_stats");
538 if (permissions & OT_PERMISSION_MAY_LIVESYNC)
539 off += snprintf(_debug + off, 512 - off, " may_sync_live");
540 if (permissions & OT_PERMISSION_MAY_FULLSCRAPE)
541 off += snprintf(_debug + off, 512 - off, " may_fetch_fullscrapes");
542 if (permissions & OT_PERMISSION_MAY_PROXY)
543 off += snprintf(_debug + off, 512 - off, " may_proxy");
544 if (!permissions)
545 off += snprintf(_debug + off, sizeof(_debug) - off, " nothing");
297 _debug[off++] = '.'; 546 _debug[off++] = '.';
298 write( 2, _debug, off ); 547 _debug[off++] = '\n';
548 (void)write(2, _debug, off);
299 } 549 }
300#endif 550#endif
301 551
302 return 0; 552 return 0;
303} 553}
304 554
305int accesslist_isblessed( ot_ip6 ip, ot_permissions permissions ) { 555int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions) {
306 unsigned int i; 556 unsigned int i;
307 for( i=0; i<g_adminip_count; ++i ) 557 for (i = 0; i < g_admin_nets_count; ++i)
308 if( !memcmp( g_adminip_addresses + i, ip, sizeof(ot_ip6)) && ( g_adminip_permissions[ i ] & permissions ) ) 558 if (address_in_net(ip, g_admin_nets + i) && (g_admin_nets_permissions[i] & permissions))
309 return 1; 559 return 1;
310 return 0; 560 return 0;
311} 561}
312
313const char *g_version_accesslist_c = "$Source$: $Revision$\n";
diff --git a/ot_accesslist.h b/ot_accesslist.h
index b38b91a..0a7488e 100644
--- a/ot_accesslist.h
+++ b/ot_accesslist.h
@@ -6,26 +6,37 @@
6#ifndef OT_ACCESSLIST_H__ 6#ifndef OT_ACCESSLIST_H__
7#define OT_ACCESSLIST_H__ 7#define OT_ACCESSLIST_H__
8 8
9#if defined ( WANT_ACCESSLIST_BLACK ) && defined (WANT_ACCESSLIST_WHITE ) 9#include "trackerlogic.h"
10# error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive. 10
11#if defined(WANT_ACCESSLIST_BLACK) && defined(WANT_ACCESSLIST_WHITE)
12#error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive.
11#endif 13#endif
12 14
13#if defined ( WANT_ACCESSLIST_BLACK ) || defined (WANT_ACCESSLIST_WHITE ) 15#if defined(WANT_ACCESSLIST_BLACK) || defined(WANT_ACCESSLIST_WHITE)
14#define WANT_ACCESSLIST 16#define WANT_ACCESSLIST
15void accesslist_init( ); 17void accesslist_init(void);
16void accesslist_deinit( ); 18void accesslist_deinit(void);
17int accesslist_hashisvalid( ot_hash hash ); 19int accesslist_hashisvalid(ot_hash hash);
20void accesslist_cleanup(void);
18 21
19extern char *g_accesslist_filename; 22extern char *g_accesslist_filename;
23#ifdef WANT_DYNAMIC_ACCESSLIST
24extern char *g_accesslist_pipe_add;
25extern char *g_accesslist_pipe_delete;
26#endif
20 27
21#else 28#else
22#define accesslist_init( accesslist_filename ) 29#ifdef WANT_DYNAMIC_ACCESSLIST
23#define accesslist_deinit( ) 30#error WANT_DYNAMIC_ACCESSLIST needs either WANT_ACCESSLIST_BLACK or WANT_ACCESSLIST_WHITE
24#define accesslist_hashisvalid( hash ) 1 31#endif
32
33#define accesslist_init(accesslist_filename)
34#define accesslist_deinit()
35#define accesslist_hashisvalid(hash) 1
25#endif 36#endif
26 37
27/* Test if an address is subset of an ot_net, return value is considered a bool */ 38/* Test if an address is subset of an ot_net, return value is considered a bool */
28int address_in_net( const ot_ip6 address, const ot_net *net ); 39int address_in_net(const ot_ip6 address, const ot_net *net);
29 40
30/* Store a value into a vector of struct { ot_net net, uint8_t[x] value } member; 41/* Store a value into a vector of struct { ot_net net, uint8_t[x] value } member;
31 returns NULL 42 returns NULL
@@ -36,18 +47,17 @@ int address_in_net( const ot_ip6 address, const ot_net *net );
36 returns pointer to new member in vector for success 47 returns pointer to new member in vector for success
37 member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping 48 member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping
38*/ 49*/
39void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size ); 50void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size);
40 51
41/* Takes a vector filled with struct { ot_net net, uint8_t[x] value } member; 52/* Takes a vector filled with struct { ot_net net, uint8_t[x] value } member;
42 Returns pointer to _member_ associated with the net, or NULL if not found 53 Returns pointer to _member_ associated with the net, or NULL if not found
43 member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping 54 member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping
44*/ 55*/
45void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size ); 56void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size);
46
47 57
48#ifdef WANT_IP_FROM_PROXY 58#ifdef WANT_IP_FROM_PROXY
49int proxylist_add_network( const ot_net *proxy, const ot_net *net ); 59int proxylist_add_network(const ot_net *proxy, const ot_net *net);
50int proxylist_check_network( const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */ ); 60int proxylist_check_network(const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */);
51#endif 61#endif
52 62
53#ifdef WANT_FULLLOG_NETWORKS 63#ifdef WANT_FULLLOG_NETWORKS
@@ -61,10 +71,10 @@ struct ot_log {
61}; 71};
62extern ot_log *g_logchain_first, *g_logchain_last; 72extern ot_log *g_logchain_first, *g_logchain_last;
63 73
64void loglist_add_network( const ot_net *net ); 74void loglist_add_network(const ot_net *net);
65void loglist_reset( ); 75void loglist_reset();
66int loglist_check_address( const ot_ip6 address ); 76int loglist_check_address(const ot_ip6 address);
67#endif 77#endif
68 78
69typedef enum { 79typedef enum {
70 OT_PERMISSION_MAY_FULLSCRAPE = 0x1, 80 OT_PERMISSION_MAY_FULLSCRAPE = 0x1,
@@ -73,7 +83,7 @@ typedef enum {
73 OT_PERMISSION_MAY_PROXY = 0x8 83 OT_PERMISSION_MAY_PROXY = 0x8
74} ot_permissions; 84} ot_permissions;
75 85
76int accesslist_blessip( ot_ip6 ip, ot_permissions permissions ); 86int accesslist_bless_net(ot_net *net, ot_permissions permissions);
77int accesslist_isblessed( ot_ip6 ip, ot_permissions permissions ); 87int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions);
78 88
79#endif 89#endif
diff --git a/ot_clean.c b/ot_clean.c
index 4c03416..291b847 100644
--- a/ot_clean.c
+++ b/ot_clean.c
@@ -5,89 +5,91 @@
5 5
6/* System */ 6/* System */
7#include <pthread.h> 7#include <pthread.h>
8#include <unistd.h>
9#include <string.h> 8#include <string.h>
9#include <unistd.h>
10 10
11/* Libowfat */ 11/* Libowfat */
12#include "io.h" 12#include "io.h"
13 13
14/* Opentracker */ 14/* Opentracker */
15#include "trackerlogic.h" 15#include "ot_accesslist.h"
16#include "ot_mutex.h"
17#include "ot_vector.h"
18#include "ot_clean.h" 16#include "ot_clean.h"
17#include "ot_mutex.h"
19#include "ot_stats.h" 18#include "ot_stats.h"
19#include "ot_vector.h"
20#include "trackerlogic.h"
20 21
21/* Returns amount of removed peers */ 22/* Returns amount of removed peers */
22static ssize_t clean_single_bucket( ot_peer *peers, size_t peer_count, time_t timedout, int *removed_seeders ) { 23static ssize_t clean_single_bucket(ot_peer *peers, size_t peer_count, size_t peer_size, time_t timedout, int *removed_seeders) {
23 ot_peer *last_peer = peers + peer_count, *insert_point; 24 ot_peer *last_peer = peers + peer_count * peer_size, *insert_point;
24 time_t timediff;
25 25
26 /* Two scan modes: unless there is one peer removed, just increase ot_peertime */ 26 /* Two scan modes: unless there is one peer removed, just increase ot_peertime */
27 while( peers < last_peer ) { 27 while (peers < last_peer) {
28 if( ( timediff = timedout + OT_PEERTIME( peers ) ) >= OT_PEER_TIMEOUT ) 28 time_t timediff = timedout + OT_PEERTIME(peers, peer_size);
29 if (timediff >= OT_PEER_TIMEOUT)
29 break; 30 break;
30 OT_PEERTIME( peers++ ) = timediff; 31 OT_PEERTIME(peers, peer_size) = timediff;
32 peers += peer_size;
33 }
34
35 /* If we at least remove one peer, we have to copy */
36 for (insert_point = peers; peers < last_peer; peers += peer_size) {
37 time_t timediff = timedout + OT_PEERTIME(peers, peer_size);
38
39 if (timediff < OT_PEER_TIMEOUT) {
40 OT_PEERTIME(peers, peer_size) = timediff;
41 memcpy(insert_point, peers, peer_size);
42 insert_point += peer_size;
43 } else if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING)
44 (*removed_seeders)++;
31 } 45 }
32 46
33 /* If we at least remove one peer, we have to copy */ 47 return (peers - insert_point) / peer_size;
34 insert_point = peers;
35 while( peers < last_peer )
36 if( ( timediff = timedout + OT_PEERTIME( peers ) ) < OT_PEER_TIMEOUT ) {
37 OT_PEERTIME( peers ) = timediff;
38 memcpy( insert_point++, peers++, sizeof(ot_peer));
39 } else
40 if( OT_PEERFLAG( peers++ ) & PEER_FLAG_SEEDING )
41 (*removed_seeders)++;
42
43 return peers - insert_point;
44} 48}
45 49
46/* Clean a single torrent 50int clean_single_peer_list(ot_peerlist *peer_list, size_t peer_size) {
47 return 1 if torrent timed out 51 ot_vector *peer_vector = &peer_list->peers;
48*/ 52 time_t timedout = (time_t)(g_now_minutes - peer_list->base);
49int clean_single_torrent( ot_torrent *torrent ) { 53 int num_buckets = 1, removed_seeders = 0;
50 ot_peerlist *peer_list = torrent->peer_list;
51 ot_vector *bucket_list = &peer_list->peers;
52 time_t timedout = (time_t)( g_now_minutes - peer_list->base );
53 int num_buckets = 1, removed_seeders = 0;
54 54
55 /* No need to clean empty torrent */ 55 /* No need to clean empty torrent */
56 if( !timedout ) 56 if (!timedout)
57 return 0; 57 return 0;
58 58
59 /* Torrent has idled out */ 59 /* Torrent has idled out */
60 if( timedout > OT_TORRENT_TIMEOUT ) 60 if (timedout > OT_TORRENT_TIMEOUT)
61 return 1; 61 return 1;
62 62
63 /* Nothing to be cleaned here? Test if torrent is worth keeping */ 63 /* Nothing to be cleaned here? Test if torrent is worth keeping */
64 if( timedout > OT_PEER_TIMEOUT ) { 64 if (timedout > OT_PEER_TIMEOUT) {
65 if( !peer_list->peer_count ) 65 if (!peer_list->peer_count)
66 return peer_list->down_count ? 0 : 1; 66 return peer_list->down_count ? 0 : 1;
67 timedout = OT_PEER_TIMEOUT; 67 timedout = OT_PEER_TIMEOUT;
68 } 68 }
69 69
70 if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { 70 if (OT_PEERLIST_HASBUCKETS(peer_list)) {
71 num_buckets = bucket_list->size; 71 num_buckets = peer_vector->size;
72 bucket_list = (ot_vector *)bucket_list->data; 72 peer_vector = (ot_vector *)peer_vector->data;
73 } 73 }
74 74
75 while( num_buckets-- ) { 75 while (num_buckets--) {
76 size_t removed_peers = clean_single_bucket( bucket_list->data, bucket_list->size, timedout, &removed_seeders ); 76 size_t removed_peers = clean_single_bucket(peer_vector->data, peer_vector->size, peer_size, timedout, &removed_seeders);
77 peer_list->peer_count -= removed_peers; 77 peer_list->peer_count -= removed_peers;
78 bucket_list->size -= removed_peers; 78 peer_vector->size -= removed_peers;
79 if( bucket_list->size < removed_peers ) 79 if (removed_peers)
80 vector_fixup_peers( bucket_list ); 80 vector_fixup_peers(peer_vector, peer_size);
81 ++bucket_list; 81
82 /* Skip to next bucket, a vector containing peers */
83 ++peer_vector;
82 } 84 }
83 85
84 peer_list->seed_count -= removed_seeders; 86 peer_list->seed_count -= removed_seeders;
85 87
86 /* See, if we need to convert a torrent from simple vector to bucket list */ 88 /* See if we need to convert a torrent from simple vector to bucket list */
87 if( ( peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT ) || OT_PEERLIST_HASBUCKETS(peer_list) ) 89 if ((peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT) || OT_PEERLIST_HASBUCKETS(peer_list))
88 vector_redistribute_buckets( peer_list ); 90 vector_redistribute_buckets(peer_list, peer_size);
89 91
90 if( peer_list->peer_count ) 92 if (peer_list->peer_count)
91 peer_list->base = g_now_minutes; 93 peer_list->base = g_now_minutes;
92 else { 94 else {
93 /* When we got here, the last time that torrent 95 /* When we got here, the last time that torrent
@@ -95,45 +97,48 @@ int clean_single_torrent( ot_torrent *torrent ) {
95 peer_list->base = g_now_minutes - OT_PEER_TIMEOUT; 97 peer_list->base = g_now_minutes - OT_PEER_TIMEOUT;
96 } 98 }
97 return 0; 99 return 0;
100}
98 101
102/* Clean a single torrent
103 return 1 if torrent timed out
104*/
105int clean_single_torrent(ot_torrent *torrent) {
106 return clean_single_peer_list(torrent->peer_list6, OT_PEER_SIZE6) * clean_single_peer_list(torrent->peer_list4, OT_PEER_SIZE4);
99} 107}
100 108
101/* Clean up all peers in current bucket, remove timedout pools and 109/* Clean up all peers in current bucket, remove timedout pools and
102 torrents */ 110 torrents */
103static void * clean_worker( void * args ) { 111static void *clean_worker(void *args) {
104 (void) args; 112 (void)args;
105 while( 1 ) { 113 while (1) {
106 int bucket = OT_BUCKET_COUNT; 114 int bucket = OT_BUCKET_COUNT;
107 while( bucket-- ) { 115 while (bucket--) {
108 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 116 ot_vector *torrents_list = mutex_bucket_lock(bucket);
109 size_t toffs; 117 size_t toffs;
110 int delta_torrentcount = 0; 118 int delta_torrentcount = 0;
111 119
112 for( toffs=0; toffs<torrents_list->size; ++toffs ) { 120 for (toffs = 0; toffs < torrents_list->size; ++toffs) {
113 ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + toffs; 121 ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + toffs;
114 if( clean_single_torrent( torrent ) ) { 122 if (clean_single_torrent(torrent)) {
115 vector_remove_torrent( torrents_list, torrent ); 123 vector_remove_torrent(torrents_list, torrent);
116 --delta_torrentcount; 124 --delta_torrentcount;
117 --toffs; 125 --toffs;
118 } 126 }
119 } 127 }
120 mutex_bucket_unlock( bucket, delta_torrentcount ); 128 mutex_bucket_unlock(bucket, delta_torrentcount);
121 if( !g_opentracker_running ) 129 if (!g_opentracker_running)
122 return NULL; 130 return NULL;
123 usleep( OT_CLEAN_SLEEP ); 131 usleep(OT_CLEAN_SLEEP);
124 } 132 }
125 stats_cleanup(); 133 stats_cleanup();
134#ifdef WANT_ACCESSLIST
135 accesslist_cleanup();
136#endif
126 } 137 }
127 return NULL; 138 return NULL;
128} 139}
129 140
130static pthread_t thread_id; 141static pthread_t thread_id;
131void clean_init( void ) { 142void clean_init(void) { pthread_create(&thread_id, NULL, clean_worker, NULL); }
132 pthread_create( &thread_id, NULL, clean_worker, NULL );
133}
134
135void clean_deinit( void ) {
136 pthread_cancel( thread_id );
137}
138 143
139const char *g_version_clean_c = "$Source$: $Revision$\n"; 144void clean_deinit(void) { pthread_cancel(thread_id); }
diff --git a/ot_clean.h b/ot_clean.h
index 956770a..e8bcdc1 100644
--- a/ot_clean.h
+++ b/ot_clean.h
@@ -7,13 +7,13 @@
7#define OT_CLEAN_H__ 7#define OT_CLEAN_H__
8 8
9/* The amount of time a clean cycle should take */ 9/* The amount of time a clean cycle should take */
10#define OT_CLEAN_INTERVAL_MINUTES 2 10#define OT_CLEAN_INTERVAL_MINUTES 2
11 11
12/* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */ 12/* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */
13#define OT_CLEAN_SLEEP ( ( ( OT_CLEAN_INTERVAL_MINUTES ) * 60 * 1000000 ) / ( OT_BUCKET_COUNT ) ) 13#define OT_CLEAN_SLEEP (((OT_CLEAN_INTERVAL_MINUTES) * 60 * 1000000) / (OT_BUCKET_COUNT))
14 14
15void clean_init( void ); 15void clean_init(void);
16void clean_deinit( void ); 16void clean_deinit(void);
17int clean_single_torrent( ot_torrent *torrent ); 17int clean_single_torrent(ot_torrent *torrent);
18 18
19#endif 19#endif
diff --git a/ot_fullscrape.c b/ot_fullscrape.c
index faea4b9..6fd6d1c 100644
--- a/ot_fullscrape.c
+++ b/ot_fullscrape.c
@@ -6,14 +6,18 @@
6#ifdef WANT_FULLSCRAPE 6#ifdef WANT_FULLSCRAPE
7 7
8/* System */ 8/* System */
9#include <sys/param.h> 9#include <arpa/inet.h>
10#include <pthread.h>
10#include <stdio.h> 11#include <stdio.h>
11#include <string.h> 12#include <string.h>
12#include <pthread.h> 13#include <sys/param.h>
13#include <arpa/inet.h>
14#ifdef WANT_COMPRESSION_GZIP 14#ifdef WANT_COMPRESSION_GZIP
15#include <zlib.h> 15#include <zlib.h>
16#endif 16#endif
17#ifdef WANT_COMPRESSION_ZSTD
18#include <zstd.h>
19#endif
20
17 21
18/* Libowfat */ 22/* Libowfat */
19#include "byte.h" 23#include "byte.h"
@@ -21,52 +25,64 @@
21#include "textcode.h" 25#include "textcode.h"
22 26
23/* Opentracker */ 27/* Opentracker */
24#include "trackerlogic.h"
25#include "ot_mutex.h"
26#include "ot_iovec.h"
27#include "ot_fullscrape.h" 28#include "ot_fullscrape.h"
29#include "ot_iovec.h"
30#include "ot_mutex.h"
31#include "trackerlogic.h"
28 32
29/* Fetch full scrape info for all torrents 33/* Fetch full scrape info for all torrents
30 Full scrapes usually are huge and one does not want to 34 Full scrapes usually are huge and one does not want to
31 allocate more memory. So lets get them in 512k units 35 allocate more memory. So lets get them in 512k units
32*/ 36*/
33#define OT_SCRAPE_CHUNK_SIZE (512*1024) 37#define OT_SCRAPE_CHUNK_SIZE (1024 * 1024)
34 38
35/* "d8:completei%zde10:downloadedi%zde10:incompletei%zdee" */ 39/* "d8:completei%zde10:downloadedi%zde10:incompletei%zdee" */
36#define OT_SCRAPE_MAXENTRYLEN 256 40#define OT_SCRAPE_MAXENTRYLEN 256
37 41
42/* Forward declaration */
43static void fullscrape_make(int taskid, ot_tasktype mode);
38#ifdef WANT_COMPRESSION_GZIP 44#ifdef WANT_COMPRESSION_GZIP
39#define IF_COMPRESSION( TASK ) if( mode & TASK_FLAG_GZIP ) TASK 45static void fullscrape_make_gzip(int taskid, ot_tasktype mode);
40#define WANT_COMPRESSION_GZIP_PARAM( param1, param2, param3 ) , param1, param2, param3 46#endif
41#else 47#ifdef WANT_COMPRESSION_ZSTD
42#define IF_COMPRESSION( TASK ) 48static void fullscrape_make_zstd(int taskid, ot_tasktype mode);
43#define WANT_COMPRESSION_GZIP_PARAM( param1, param2, param3 )
44#endif 49#endif
45
46/* Forward declaration */
47static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode );
48 50
49/* Converter function from memory to human readable hex strings 51/* Converter function from memory to human readable hex strings
50 XXX - Duplicated from ot_stats. Needs fix. */ 52 XXX - Duplicated from ot_stats. Needs fix. */
51static char*to_hex(char*d,uint8_t*s){char*m="0123456789ABCDEF";char *t=d;char*e=d+40;while(d<e){*d++=m[*s>>4];*d++=m[*s++&15];}*d=0;return t;} 53static char *to_hex(char *d, uint8_t *s) {
54 char *m = "0123456789ABCDEF";
55 char *t = d;
56 char *e = d + 40;
57 while (d < e) {
58 *d++ = m[*s >> 4];
59 *d++ = m[*s++ & 15];
60 }
61 *d = 0;
62 return t;
63}
52 64
53/* This is the entry point into this worker thread 65/* This is the entry point into this worker thread
54 It grabs tasks from mutex_tasklist and delivers results back 66 It grabs tasks from mutex_tasklist and delivers results back
55*/ 67*/
56static void * fullscrape_worker( void * args ) { 68static void *fullscrape_worker(void *args) {
57 int iovec_entries; 69 (void)args;
58 struct iovec *iovector;
59
60 (void) args;
61 70
62 while( 1 ) { 71 while (g_opentracker_running) {
63 ot_tasktype tasktype = TASK_FULLSCRAPE; 72 ot_tasktype tasktype = TASK_FULLSCRAPE;
64 ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); 73 ot_taskid taskid = mutex_workqueue_poptask(&tasktype);
65 fullscrape_make( &iovec_entries, &iovector, tasktype ); 74#ifdef WANT_COMPRESSION_ZSTD
66 if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) 75 if (tasktype & TASK_FLAG_ZSTD)
67 iovec_free( &iovec_entries, &iovector ); 76 fullscrape_make_zstd(taskid, tasktype);
68 if( !g_opentracker_running ) 77 else
69 return NULL; 78#endif
79#ifdef WANT_COMPRESSION_GZIP
80 if (tasktype & TASK_FLAG_GZIP)
81 fullscrape_make_gzip(taskid, tasktype);
82 else
83#endif
84 fullscrape_make(taskid, tasktype);
85 mutex_workqueue_pushchunked(taskid, NULL);
70 } 86 }
71 return NULL; 87 return NULL;
72} 88}
@@ -84,166 +100,358 @@ void fullscrape_deliver( int64 sock, ot_tasktype tasktype ) {
84 mutex_workqueue_pushtask( sock, tasktype ); 100 mutex_workqueue_pushtask( sock, tasktype );
85} 101}
86 102
87static int fullscrape_increase( int *iovec_entries, struct iovec **iovector, 103static char * fullscrape_write_one( ot_tasktype mode, char *r, ot_torrent *torrent, ot_hash *hash ) {
88 char **r, char **re WANT_COMPRESSION_GZIP_PARAM( z_stream *strm, ot_tasktype mode, int zaction ) ) { 104 size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
89 /* Allocate a fresh output buffer at the end of our buffers list */ 105 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
90 if( !( *r = iovec_fix_increase_or_free( iovec_entries, iovector, *r, OT_SCRAPE_CHUNK_SIZE ) ) ) { 106 size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count;
107
108 switch (mode & TASK_TASK_MASK) {
109 case TASK_FULLSCRAPE:
110 default:
111 /* push hash as bencoded string */
112 *r++ = '2';
113 *r++ = '0';
114 *r++ = ':';
115 memcpy(r, hash, sizeof(ot_hash));
116 r += sizeof(ot_hash);
117 /* push rest of the scrape string */
118 r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", seed_count, down_count, peer_count - seed_count);
119
120 break;
121 case TASK_FULLSCRAPE_TPB_ASCII:
122 to_hex(r, *hash);
123 r += 2 * sizeof(ot_hash);
124 r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count);
125 break;
126 case TASK_FULLSCRAPE_TPB_ASCII_PLUS:
127 to_hex(r, *hash);
128 r += 2 * sizeof(ot_hash);
129 r += sprintf(r, ":%zd:%zd:%zd\n", seed_count, peer_count - seed_count, down_count);
130 break;
131 case TASK_FULLSCRAPE_TPB_BINARY:
132 memcpy(r, *hash, sizeof(ot_hash));
133 r += sizeof(ot_hash);
134 *(uint32_t *)(r + 0) = htonl((uint32_t)seed_count);
135 *(uint32_t *)(r + 4) = htonl((uint32_t)(peer_count - seed_count));
136 r += 8;
137 break;
138 case TASK_FULLSCRAPE_TPB_URLENCODED:
139 r += fmt_urlencoded(r, (char *)*hash, 20);
140 r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count);
141 break;
142 case TASK_FULLSCRAPE_TRACKERSTATE:
143 to_hex(r, *hash);
144 r += 2 * sizeof(ot_hash);
145 r += sprintf(r, ":%zd:%zd\n", torrent->peer_list6->base, down_count);
146 break;
147 }
148 return r;
149}
91 150
92 /* Deallocate gzip buffers */ 151static void fullscrape_make(int taskid, ot_tasktype mode) {
93 IF_COMPRESSION( deflateEnd(strm); ) 152 int bucket;
153 char *r, *re;
154 struct iovec iovector = {NULL, 0};
94 155
95 /* Release lock on current bucket and return */ 156 /* Setup return vector... */
96 return -1; 157 r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
97 } 158 if (!r)
159 return;
98 160
99 /* Adjust new end of output buffer */ 161 /* re points to low watermark */
100 *re = *r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; 162 re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN;
101 163
102 /* When compressing, we have all the bytes in output buffer */ 164 if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE)
103#ifdef WANT_COMPRESSION_GZIP 165 r += sprintf(r, "d5:filesd");
104 if( mode & TASK_FLAG_GZIP ) { 166
105 int zres; 167 /* For each bucket... */
106 *re -= OT_SCRAPE_MAXENTRYLEN; 168 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
107 strm->next_out = (uint8_t*)*r; 169 /* Get exclusive access to that bucket */
108 strm->avail_out = OT_SCRAPE_CHUNK_SIZE; 170 ot_vector *torrents_list = mutex_bucket_lock(bucket);
109 zres = deflate( strm, zaction ); 171 ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
110 if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) ) 172 size_t i;
111 fprintf( stderr, "deflate() failed while in fullscrape_increase(%d).\n", zaction ); 173
112 *r = (char*)strm->next_out; 174 /* For each torrent in this bucket.. */
175 for (i = 0; i < torrents_list->size; ++i) {
176 r = fullscrape_write_one(mode, r, torrents + i, &torrents[i].hash);
177
178 if (r > re) {
179 iovector.iov_len = r - (char *)iovector.iov_base;
180
181 if (mutex_workqueue_pushchunked(taskid, &iovector)) {
182 free(iovector.iov_base);
183 return mutex_bucket_unlock(bucket, 0);
184 }
185 /* Allocate a fresh output buffer */
186 r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
187 if (!r)
188 return mutex_bucket_unlock(bucket, 0);
189
190 /* re points to low watermark */
191 re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN;
192 }
193 }
194
195 /* All torrents done: release lock on current bucket */
196 mutex_bucket_unlock(bucket, 0);
197
198 /* Parent thread died? */
199 if (!g_opentracker_running)
200 return;
113 } 201 }
114#endif
115 202
116 return 0; 203 if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE)
204 r += sprintf(r, "ee");
205
206 /* Send rest of data */
207 iovector.iov_len = r - (char *)iovector.iov_base;
208 if (mutex_workqueue_pushchunked(taskid, &iovector))
209 free(iovector.iov_base);
117} 210}
118 211
119static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) {
120 int bucket;
121 char *r, *re;
122#ifdef WANT_COMPRESSION_GZIP 212#ifdef WANT_COMPRESSION_GZIP
123 char compress_buffer[OT_SCRAPE_MAXENTRYLEN];
124 z_stream strm;
125#endif
126 213
214static void fullscrape_make_gzip(int taskid, ot_tasktype mode) {
215 int bucket;
216 char *r;
217 struct iovec iovector = {NULL, 0};
218 int zres;
219 z_stream strm;
127 /* Setup return vector... */ 220 /* Setup return vector... */
128 *iovec_entries = 0; 221 iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
129 *iovector = NULL; 222 if (!iovector.iov_base)
130 if( !( r = iovec_increase( iovec_entries, iovector, OT_SCRAPE_CHUNK_SIZE ) ) )
131 return; 223 return;
132 224
133 /* re points to low watermark */ 225 byte_zero(&strm, sizeof(strm));
134 re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; 226 strm.next_out = (uint8_t *)iovector.iov_base;
227 strm.avail_out = OT_SCRAPE_CHUNK_SIZE;
228 if (deflateInit2(&strm, 7, Z_DEFLATED, 31, 9, Z_DEFAULT_STRATEGY) != Z_OK)
229 fprintf(stderr, "not ok.\n");
135 230
136#ifdef WANT_COMPRESSION_GZIP 231 if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
137 if( mode & TASK_FLAG_GZIP ) { 232 strm.next_in = (uint8_t *)"d5:filesd";
138 re += OT_SCRAPE_MAXENTRYLEN; 233 strm.avail_in = strlen("d5:filesd");
139 byte_zero( &strm, sizeof(strm) ); 234 zres = deflate(&strm, Z_NO_FLUSH);
140 strm.next_in = (uint8_t*)compress_buffer;
141 strm.next_out = (uint8_t*)r;
142 strm.avail_out = OT_SCRAPE_CHUNK_SIZE;
143 if( deflateInit2(&strm,7,Z_DEFLATED,31,8,Z_DEFAULT_STRATEGY) != Z_OK )
144 fprintf( stderr, "not ok.\n" );
145 r = compress_buffer;
146 } 235 }
147#endif
148
149 if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE )
150 r += sprintf( r, "d5:filesd" );
151 236
152 /* For each bucket... */ 237 /* For each bucket... */
153 for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 238 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
154 /* Get exclusive access to that bucket */ 239 /* Get exclusive access to that bucket */
155 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 240 ot_vector *torrents_list = mutex_bucket_lock(bucket);
156 size_t tor_offset; 241 ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
242 size_t i;
157 243
158 /* For each torrent in this bucket.. */ 244 /* For each torrent in this bucket.. */
159 for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { 245 for (i = 0; i < torrents_list->size; ++i) {
160 /* Address torrents members */ 246 char compress_buffer[OT_SCRAPE_MAXENTRYLEN];
161 ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[tor_offset] ).peer_list; 247 r = fullscrape_write_one(mode, compress_buffer, torrents + i, &torrents[i].hash);
162 ot_hash *hash =&( ((ot_torrent*)(torrents_list->data))[tor_offset] ).hash; 248 strm.next_in = (uint8_t *)compress_buffer;
163 249 strm.avail_in = r - compress_buffer;
164 switch( mode & TASK_TASK_MASK ) { 250 zres = deflate(&strm, Z_NO_FLUSH);
165 case TASK_FULLSCRAPE: 251 if ((zres < Z_OK) && (zres != Z_BUF_ERROR))
166 default: 252 fprintf(stderr, "deflate() failed while in fullscrape_make().\n");
167 /* push hash as bencoded string */ 253
168 *r++='2'; *r++='0'; *r++=':'; 254 /* Check if there still is enough buffer left */
169 memcpy( r, hash, sizeof(ot_hash) ); r += sizeof(ot_hash); 255 while (!strm.avail_out) {
170 /* push rest of the scrape string */ 256 iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base;
171 r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count ); 257
172 258 if (mutex_workqueue_pushchunked(taskid, &iovector)) {
173 break; 259 free(iovector.iov_base);
174 case TASK_FULLSCRAPE_TPB_ASCII: 260 return mutex_bucket_unlock(bucket, 0);
175 to_hex( r, *hash ); r+= 2 * sizeof(ot_hash); 261 }
176 r += sprintf( r, ":%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count ); 262 /* Allocate a fresh output buffer */
177 break; 263 iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
178 case TASK_FULLSCRAPE_TPB_ASCII_PLUS: 264 if (!iovector.iov_base) {
179 to_hex( r, *hash ); r+= 2 * sizeof(ot_hash); 265 fprintf(stderr, "Out of memory trying to claim ouput buffer\n");
180 r += sprintf( r, ":%zd:%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count, peer_list->down_count ); 266 deflateEnd(&strm);
181 break; 267 return mutex_bucket_unlock(bucket, 0);
182 case TASK_FULLSCRAPE_TPB_BINARY: 268 }
183 memcpy( r, *hash, sizeof(ot_hash) ); r += sizeof(ot_hash); 269 strm.next_out = (uint8_t *)iovector.iov_base;
184 *(uint32_t*)(r+0) = htonl( (uint32_t) peer_list->seed_count ); 270 strm.avail_out = OT_SCRAPE_CHUNK_SIZE;
185 *(uint32_t*)(r+4) = htonl( (uint32_t)( peer_list->peer_count-peer_list->seed_count) ); 271 zres = deflate(&strm, Z_NO_FLUSH);
186 r+=8; 272 if ((zres < Z_OK) && (zres != Z_BUF_ERROR))
187 break; 273 fprintf(stderr, "deflate() failed while in fullscrape_make().\n");
188 case TASK_FULLSCRAPE_TPB_URLENCODED:
189 r += fmt_urlencoded( r, (char *)*hash, 20 );
190 r += sprintf( r, ":%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count );
191 break;
192 case TASK_FULLSCRAPE_TRACKERSTATE:
193 to_hex( r, *hash ); r+= 2 * sizeof(ot_hash);
194 r += sprintf( r, ":%zd:%zd\n", peer_list->base, peer_list->down_count );
195 break;
196 } 274 }
275 }
197 276
198#ifdef WANT_COMPRESSION_GZIP 277 /* All torrents done: release lock on current bucket */
199 if( mode & TASK_FLAG_GZIP ) { 278 mutex_bucket_unlock(bucket, 0);
200 int zres; 279
201 strm.next_in = (uint8_t*)compress_buffer; 280 /* Parent thread died? */
202 strm.avail_in = r - compress_buffer; 281 if (!g_opentracker_running) {
203 zres = deflate( &strm, Z_NO_FLUSH ); 282 deflateEnd(&strm);
204 if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) ) 283 return;
205 fprintf( stderr, "deflate() failed while in fullscrape_make().\n" ); 284 }
206 r = (char*)strm.next_out; 285 }
286
287 if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
288 strm.next_in = (uint8_t *)"ee";
289 strm.avail_in = strlen("ee");
290 }
291
292 if (deflate(&strm, Z_FINISH) < Z_OK)
293 fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n");
294
295 iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base;
296 if (mutex_workqueue_pushchunked(taskid, &iovector)) {
297 free(iovector.iov_base);
298 deflateEnd(&strm);
299 return;
300 }
301
302 /* Check if there's a last batch of data in the zlib buffer */
303 if (!strm.avail_out) {
304 /* Allocate a fresh output buffer */
305 iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
306
307 if (!iovector.iov_base) {
308 fprintf(stderr, "Problem with iovec_fix_increase_or_free\n");
309 deflateEnd(&strm);
310 return;
207 } 311 }
312 strm.next_out = iovector.iov_base;
313 strm.avail_out = OT_SCRAPE_CHUNK_SIZE;
314 if (deflate(&strm, Z_FINISH) < Z_OK)
315 fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n");
316
317 /* Only pass the new buffer if there actually was some data left in the buffer */
318 iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base;
319 if (!iovector.iov_len || mutex_workqueue_pushchunked(taskid, &iovector))
320 free(iovector.iov_base);
321 }
322
323 deflateEnd(&strm);
324}
325/* WANT_COMPRESSION_GZIP */
208#endif 326#endif
209 327
210 /* Check if there still is enough buffer left */ 328#ifdef WANT_COMPRESSION_ZSTD
211 while( r >= re )
212 if( fullscrape_increase( iovec_entries, iovector, &r, &re WANT_COMPRESSION_GZIP_PARAM( &strm, mode, Z_NO_FLUSH ) ) )
213 return mutex_bucket_unlock( bucket, 0 );
214 329
215 IF_COMPRESSION( r = compress_buffer; ) 330static void fullscrape_make_zstd(int taskid, ot_tasktype mode) {
331 int bucket;
332 char *r;
333 struct iovec iovector = {NULL, 0};
334 ZSTD_CCtx *zstream = ZSTD_createCCtx();
335 ZSTD_inBuffer inbuf;
336 ZSTD_outBuffer outbuf;
337 size_t more_bytes;
338
339 if (!zstream)
340 return;
341
342 /* Setup return vector... */
343 iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
344 if (!iovector.iov_base) {
345 ZSTD_freeCCtx(zstream);
346 return;
347 }
348
349 /* Working with a compression level 6 is half as fast as level 3, but
350 seems to be the last reasonable bump that's worth extra cpu */
351 ZSTD_CCtx_setParameter(zstream, ZSTD_c_compressionLevel, 6);
352
353 outbuf.dst = iovector.iov_base;
354 outbuf.size = OT_SCRAPE_CHUNK_SIZE;
355 outbuf.pos = 0;
356
357 if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
358 inbuf.src = (const void *)"d5:filesd";
359 inbuf.size = strlen("d5:filesd");
360 inbuf.pos = 0;
361 ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue);
362 }
363
364 /* For each bucket... */
365 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
366 /* Get exclusive access to that bucket */
367 ot_vector *torrents_list = mutex_bucket_lock(bucket);
368 ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
369 size_t i;
370
371 /* For each torrent in this bucket.. */
372 for (i = 0; i < torrents_list->size; ++i) {
373 char compress_buffer[OT_SCRAPE_MAXENTRYLEN];
374 r = fullscrape_write_one(mode, compress_buffer, torrents + i, &torrents[i].hash);
375 inbuf.src = compress_buffer;
376 inbuf.size = r - compress_buffer;
377 inbuf.pos = 0;
378 ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue);
379
380 /* Check if there still is enough buffer left */
381 while (outbuf.pos + OT_SCRAPE_MAXENTRYLEN > outbuf.size) {
382 iovector.iov_len = outbuf.size;
383
384 if (mutex_workqueue_pushchunked(taskid, &iovector)) {
385 free(iovector.iov_base);
386 ZSTD_freeCCtx(zstream);
387 return mutex_bucket_unlock(bucket, 0);
388 }
389 /* Allocate a fresh output buffer */
390 iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
391 if (!iovector.iov_base) {
392 fprintf(stderr, "Out of memory trying to claim ouput buffer\n");
393 ZSTD_freeCCtx(zstream);
394 return mutex_bucket_unlock(bucket, 0);
395 }
396
397 outbuf.dst = iovector.iov_base;
398 outbuf.size = OT_SCRAPE_CHUNK_SIZE;
399 outbuf.pos = 0;
400
401 ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue);
402 }
216 } 403 }
217 404
218 /* All torrents done: release lock on current bucket */ 405 /* All torrents done: release lock on current bucket */
219 mutex_bucket_unlock( bucket, 0 ); 406 mutex_bucket_unlock(bucket, 0);
220 407
221 /* Parent thread died? */ 408 /* Parent thread died? */
222 if( !g_opentracker_running ) 409 if (!g_opentracker_running)
223 return; 410 return;
224 } 411 }
225 412
226 if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) 413 if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
227 r += sprintf( r, "ee" ); 414 inbuf.src = (const void *)"ee";
415 inbuf.size = strlen("ee");
416 inbuf.pos = 0;
417 }
418
419 more_bytes = ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_end);
228 420
229#ifdef WANT_COMPRESSION_GZIP 421 iovector.iov_len = outbuf.pos;
230 if( mode & TASK_FLAG_GZIP ) { 422 if (mutex_workqueue_pushchunked(taskid, &iovector)) {
231 strm.next_in = (uint8_t*)compress_buffer; 423 free(iovector.iov_base);
232 strm.avail_in = r - compress_buffer; 424 ZSTD_freeCCtx(zstream);
233 if( deflate( &strm, Z_FINISH ) < Z_OK ) 425 return;
234 fprintf( stderr, "deflate() failed while in fullscrape_make()'s endgame.\n" );
235 r = (char*)strm.next_out;
236
237 while( r >= re )
238 if( fullscrape_increase( iovec_entries, iovector, &r, &re WANT_COMPRESSION_GZIP_PARAM( &strm, mode, Z_FINISH ) ) )
239 return mutex_bucket_unlock( bucket, 0 );
240 deflateEnd(&strm);
241 } 426 }
242#endif
243 427
244 /* Release unused memory in current output buffer */ 428 /* Check if there's a last batch of data in the zlib buffer */
245 iovec_fixlast( iovec_entries, iovector, r ); 429 if (more_bytes) {
430 /* Allocate a fresh output buffer */
431 iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
432
433 if (!iovector.iov_base) {
434 fprintf(stderr, "Problem with iovec_fix_increase_or_free\n");
435 ZSTD_freeCCtx(zstream);
436 return;
437 }
438
439 outbuf.dst = iovector.iov_base;
440 outbuf.size = OT_SCRAPE_CHUNK_SIZE;
441 outbuf.pos = 0;
442
443 ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_end);
444
445 /* Only pass the new buffer if there actually was some data left in the buffer */
446 iovector.iov_len = outbuf.pos;
447 if (!iovector.iov_len || mutex_workqueue_pushchunked(taskid, &iovector))
448 free(iovector.iov_base);
449 }
450
451 ZSTD_freeCCtx(zstream);
246} 452}
453/* WANT_COMPRESSION_ZSTD */
247#endif 454#endif
248 455
249const char *g_version_fullscrape_c = "$Source$: $Revision$\n"; 456/* WANT_FULLSCRAPE */
457#endif
diff --git a/ot_fullscrape.h b/ot_fullscrape.h
index 0f920ec..bbb2a3f 100644
--- a/ot_fullscrape.h
+++ b/ot_fullscrape.h
@@ -8,9 +8,11 @@
8 8
9#ifdef WANT_FULLSCRAPE 9#ifdef WANT_FULLSCRAPE
10 10
11void fullscrape_init( ); 11#include "ot_mutex.h"
12void fullscrape_deinit( ); 12
13void fullscrape_deliver( int64 sock, ot_tasktype tasktype ); 13void fullscrape_init();
14void fullscrape_deinit();
15void fullscrape_deliver(int64 sock, ot_tasktype tasktype);
14 16
15#else 17#else
16 18
diff --git a/ot_http.c b/ot_http.c
index e05db72..af3f210 100644
--- a/ot_http.c
+++ b/ot_http.c
@@ -4,506 +4,645 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <sys/types.h>
8#include <arpa/inet.h> 7#include <arpa/inet.h>
9#include <stdlib.h> 8#include <pthread.h>
9#define _GNU_SOURCE
10#include <stdio.h> 10#include <stdio.h>
11#include <stdlib.h>
11#include <string.h> 12#include <string.h>
13#include <sys/types.h>
12#include <unistd.h> 14#include <unistd.h>
13#include <pthread.h>
14 15
15/* Libowfat */ 16/* Libowfat */
16#include "byte.h"
17#include "array.h" 17#include "array.h"
18#include "byte.h"
19#include "case.h"
18#include "iob.h" 20#include "iob.h"
19#include "ip6.h" 21#include "ip6.h"
20#include "scan.h" 22#include "scan.h"
21#include "case.h"
22 23
23/* Opentracker */ 24/* Opentracker */
24#include "trackerlogic.h" 25#include "ot_accesslist.h"
25#include "ot_mutex.h" 26#include "ot_fullscrape.h"
26#include "ot_http.h" 27#include "ot_http.h"
27#include "ot_iovec.h" 28#include "ot_iovec.h"
28#include "scan_urlencoded_query.h" 29#include "ot_mutex.h"
29#include "ot_fullscrape.h"
30#include "ot_stats.h" 30#include "ot_stats.h"
31#include "ot_accesslist.h" 31#include "scan_urlencoded_query.h"
32#include "trackerlogic.h"
33
34#ifdef WANT_NO_AUTO_FREE
35#define OT_IOB_INIT(B) bzero(B, sizeof(io_batch))
36#else
37#define OT_IOB_INIT(B) iob_init_autofree(B, 0)
38#endif
32 39
33#define OT_MAXMULTISCRAPE_COUNT 64 40#define OT_MAXMULTISCRAPE_COUNT 64
41#define OT_BATCH_LIMIT (1024 * 1024 * 16)
34extern char *g_redirecturl; 42extern char *g_redirecturl;
35 43
36char *g_stats_path; 44char *g_stats_path;
37ssize_t g_stats_path_len; 45ssize_t g_stats_path_len;
38 46
39enum { 47enum { SUCCESS_HTTP_HEADER_LENGTH = 80, SUCCESS_HTTP_SIZE_OFF = 17 };
40 SUCCESS_HTTP_HEADER_LENGTH = 80,
41 SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING = 32,
42 SUCCESS_HTTP_SIZE_OFF = 17 };
43 48
44static void http_senddata( const int64 sock, struct ot_workstruct *ws ) { 49static void http_senddata(const int64 sock, struct ot_workstruct *ws) {
45 struct http_data *cookie = io_getcookie( sock ); 50 struct http_data *cookie = io_getcookie(sock);
46 ssize_t written_size; 51 ssize_t written_size;
47 52
48 if( !cookie ) { io_close(sock); return; } 53 if (!cookie) {
54 io_close(sock);
55 return;
56 }
49 57
50 /* whoever sends data is not interested in its input-array */ 58 /* whoever sends data is not interested in its input-array */
51 if( ws->keep_alive && ws->header_size != ws->request_size ) { 59 if (ws->keep_alive && ws->header_size != ws->request_size) {
52 size_t rest = ws->request_size - ws->header_size; 60 size_t rest = ws->request_size - ws->header_size;
53 if( array_start(&cookie->request) ) { 61 if (array_start(&cookie->request)) {
54 memmove( array_start(&cookie->request), ws->request + ws->header_size, rest ); 62 memmove(array_start(&cookie->request), ws->request + ws->header_size, rest);
55 array_truncate( &cookie->request, 1, rest ); 63 array_truncate(&cookie->request, 1, rest);
56 } else 64 } else
57 array_catb(&cookie->request, ws->request + ws->header_size, rest ); 65 array_catb(&cookie->request, ws->request + ws->header_size, rest);
58 } else 66 } else
59 array_reset( &cookie->request ); 67 array_reset(&cookie->request);
60 68
61 written_size = write( sock, ws->reply, ws->reply_size ); 69 written_size = write(sock, ws->reply, ws->reply_size);
62 if( ( written_size < 0 ) || ( ( written_size == ws->reply_size ) && !ws->keep_alive ) ) { 70 if ((written_size < 0) || ((written_size == ws->reply_size) && !ws->keep_alive)) {
63 array_reset( &cookie->request ); 71 array_reset(&cookie->request);
64 free( cookie ); io_close( sock ); return; 72 free(cookie);
73 io_close(sock);
74 return;
65 } 75 }
66 76
67 if( written_size < ws->reply_size ) { 77 if (written_size < ws->reply_size) {
68 char * outbuf; 78 char *outbuf;
69 tai6464 t; 79 tai6464 t;
70 80
71 if( !( outbuf = malloc( ws->reply_size - written_size ) ) ) { 81 if (!(outbuf = malloc(ws->reply_size - written_size))) {
72 array_reset( &cookie->request ); 82 array_reset(&cookie->request);
73 free(cookie); io_close( sock ); 83 free(cookie);
84 io_close(sock);
74 return; 85 return;
75 } 86 }
76 87
77 memcpy( outbuf, ws->reply + written_size, ws->reply_size - written_size ); 88 memcpy(outbuf, ws->reply + written_size, ws->reply_size - written_size);
78 iob_addbuf_free( &cookie->batch, outbuf, ws->reply_size - written_size ); 89 if (!cookie->batch) {
90 cookie->batch = malloc(sizeof(io_batch));
91 OT_IOB_INIT(cookie->batch);
92 cookie->batches = 1;
93 }
94
95 iob_addbuf_free(cookie->batch, outbuf, ws->reply_size - written_size);
79 96
80 /* writeable short data sockets just have a tcp timeout */ 97 /* writeable short data sockets just have a tcp timeout */
81 if( !ws->keep_alive ) { 98 if (!ws->keep_alive) {
82 taia_uint( &t, 0 ); io_timeout( sock, t ); 99 taia_uint(&t, 0);
83 io_dontwantread( sock ); 100 io_timeout(sock, t);
101 io_dontwantread(sock);
84 } 102 }
85 io_wantwrite( sock ); 103 io_wantwrite(sock);
86 } 104 }
87} 105}
88 106
89#define HTTPERROR_302 return http_issue_error( sock, ws, CODE_HTTPERROR_302 ) 107#define HTTPERROR_302 return http_issue_error(sock, ws, CODE_HTTPERROR_302)
90#define HTTPERROR_400 return http_issue_error( sock, ws, CODE_HTTPERROR_400 ) 108#define HTTPERROR_400 return http_issue_error(sock, ws, CODE_HTTPERROR_400)
91#define HTTPERROR_400_PARAM return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM ) 109#define HTTPERROR_400_PARAM return http_issue_error(sock, ws, CODE_HTTPERROR_400_PARAM)
92#define HTTPERROR_400_COMPACT return http_issue_error( sock, ws, CODE_HTTPERROR_400_COMPACT ) 110#define HTTPERROR_400_COMPACT return http_issue_error(sock, ws, CODE_HTTPERROR_400_COMPACT)
93#define HTTPERROR_400_DOUBLEHASH return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM ) 111#define HTTPERROR_400_DOUBLEHASH return http_issue_error(sock, ws, CODE_HTTPERROR_400_PARAM)
94#define HTTPERROR_402_NOTMODEST return http_issue_error( sock, ws, CODE_HTTPERROR_402_NOTMODEST ) 112#define HTTPERROR_402_NOTMODEST return http_issue_error(sock, ws, CODE_HTTPERROR_402_NOTMODEST)
95#define HTTPERROR_403_IP return http_issue_error( sock, ws, CODE_HTTPERROR_403_IP ) 113#define HTTPERROR_403_IP return http_issue_error(sock, ws, CODE_HTTPERROR_403_IP)
96#define HTTPERROR_404 return http_issue_error( sock, ws, CODE_HTTPERROR_404 ) 114#define HTTPERROR_404 return http_issue_error(sock, ws, CODE_HTTPERROR_404)
97#define HTTPERROR_500 return http_issue_error( sock, ws, CODE_HTTPERROR_500 ) 115#define HTTPERROR_500 return http_issue_error(sock, ws, CODE_HTTPERROR_500)
98ssize_t http_issue_error( const int64 sock, struct ot_workstruct *ws, int code ) { 116ssize_t http_issue_error(const int64 sock, struct ot_workstruct *ws, int code) {
99 char *error_code[] = { "302 Found", "400 Invalid Request", "400 Invalid Request", "400 Invalid Request", "402 Payment Required", 117 char *error_code[] = {"302 Found", "400 Invalid Request", "400 Invalid Request", "400 Invalid Request", "402 Payment Required",
100 "403 Not Modest", "403 Access Denied", "404 Not Found", "500 Internal Server Error" }; 118 "403 Not Modest", "403 Access Denied", "404 Not Found", "500 Internal Server Error"};
101 char *title = error_code[code]; 119 char *title = error_code[code];
102 120
103 ws->reply = ws->outbuf; 121 ws->reply = ws->outbuf;
104 if( code == CODE_HTTPERROR_302 ) 122 if (code == CODE_HTTPERROR_302)
105 ws->reply_size = snprintf( ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 302 Found\r\nContent-Length: 0\r\nLocation: %s\r\n\r\n", g_redirecturl ); 123 ws->reply_size = snprintf(ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 302 Found\r\nContent-Length: 0\r\nLocation: %s\r\n\r\n", g_redirecturl);
106 else 124 else
107 ws->reply_size = snprintf( ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 %s\r\nContent-Type: text/html\r\nContent-Length: %zd\r\n\r\n<title>%s</title>\n", title, strlen(title)+16-4,title+4); 125 ws->reply_size = snprintf(ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 %s\r\nContent-Type: text/html\r\nContent-Length: %zd\r\n\r\n<title>%s</title>\n", title,
126 strlen(title) + 16 - 4, title + 4);
108 127
109#ifdef _DEBUG_HTTPERROR 128#ifdef _DEBUG_HTTPERROR
110 fprintf( stderr, "DEBUG: invalid request was: %s\n", ws->debugbuf ); 129 fprintf(stderr, "DEBUG: invalid request was: %s\n", ws->debugbuf);
111#endif 130#endif
112 stats_issue_event( EVENT_FAILED, FLAG_TCP, code ); 131 stats_issue_event(EVENT_FAILED, FLAG_TCP, code);
113 http_senddata( sock, ws ); 132 http_senddata(sock, ws);
114 return ws->reply_size = -2; 133 return ws->reply_size = -2;
115} 134}
116 135
117ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ) { 136ssize_t http_sendiovecdata(const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial) {
118 struct http_data *cookie = io_getcookie( sock ); 137 struct http_data *cookie = io_getcookie(sock);
119 char *header; 138 io_batch *current;
120 int i; 139 char *header;
121 size_t header_size, size = iovec_length( &iovec_entries, &iovector ); 140 const char *encoding = "";
122 tai6464 t; 141 int i;
142 size_t header_size, size = iovec_length(&iovec_entries, (const struct iovec **)&iovector);
143 tai6464 t;
123 144
124 /* No cookie? Bad socket. Leave. */ 145 /* No cookie? Bad socket. Leave. */
125 if( !cookie ) { 146 if (!cookie) {
126 iovec_free( &iovec_entries, &iovector ); 147 iovec_free(&iovec_entries, &iovector);
127 HTTPERROR_500; 148 HTTPERROR_500;
128 } 149 }
129 150
130 /* If this socket collected request in a buffer, free it now */ 151 /* If this socket collected request in a buffer, free it now */
131 array_reset( &cookie->request ); 152 array_reset(&cookie->request);
132 153
133 /* If we came here, wait for the answer is over */ 154 /* If we came here, wait for the answer is over */
134 cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK; 155 if (cookie->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK) {
135 156 io_dontwantread(sock);
136 /* Our answers never are 0 vectors. Return an error. */ 157 cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK;
137 if( !iovec_entries ) {
138 HTTPERROR_500;
139 } 158 }
140 159
141 /* Prepare space for http header */ 160 if (iovec_entries) {
142 header = malloc( SUCCESS_HTTP_HEADER_LENGTH + SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING ); 161
143 if( !header ) { 162 if (cookie->flag & STRUCT_HTTP_FLAG_ZSTD)
144 iovec_free( &iovec_entries, &iovector ); 163 encoding = "Content-Encoding: zstd\r\n";
145 HTTPERROR_500; 164 else if (cookie->flag & STRUCT_HTTP_FLAG_GZIP)
146 } 165 encoding = "Content-Encoding: gzip\r\n";
147 166 else if (cookie->flag & STRUCT_HTTP_FLAG_BZIP2)
148 if( cookie->flag & STRUCT_HTTP_FLAG_GZIP ) 167 encoding = "Content-Encoding: bzip2\r\n";
149 header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: gzip\r\nContent-Length: %zd\r\n\r\n", size ); 168
150 else if( cookie->flag & STRUCT_HTTP_FLAG_BZIP2 ) 169 if (!(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED))
151 header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: bzip2\r\nContent-Length: %zd\r\n\r\n", size ); 170 header_size = asprintf(&header, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n%sContent-Length: %zd\r\n\r\n", encoding, size);
152 else 171 else {
153 header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r\n", size ); 172 if (!(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER)) {
173 header_size =
174 asprintf(&header, "HTTP/1.1 200 OK\r\nContent-Type: application/octet-stream\r\n%sTransfer-Encoding: chunked\r\n\r\n%zx\r\n", encoding, size);
175 cookie->flag |= STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER;
176 } else
177 header_size = asprintf(&header, "%zx\r\n", size);
178 }
179 if (!header) {
180 iovec_free(&iovec_entries, &iovector);
181 HTTPERROR_500;
182 }
154 183
155 iob_reset( &cookie->batch ); 184 if (!cookie->batch) {
156 iob_addbuf_free( &cookie->batch, header, header_size ); 185 cookie->batch = malloc(sizeof(io_batch));
186 if (!cookie->batch) {
187 free(header);
188 iovec_free(&iovec_entries, &iovector);
189 HTTPERROR_500;
190 }
191 OT_IOB_INIT(cookie->batch);
192 cookie->batches = 1;
193 }
194 current = cookie->batch + cookie->batches - 1;
195 iob_addbuf_free(current, header, header_size);
196
197 /* Split huge iovectors into separate io_batches */
198 for (i = 0; i < iovec_entries; ++i) {
199 /* If the current batch's limit is reached, try to reallocate a new batch to work on */
200 if (current->bytesleft > OT_BATCH_LIMIT) {
201 io_batch *new_batch = realloc(cookie->batch, (cookie->batches + 1) * sizeof(io_batch));
202 if (new_batch) {
203 cookie->batch = new_batch;
204 current = cookie->batch + cookie->batches++;
205 OT_IOB_INIT(current);
206 }
207 }
208 iob_addbuf_free(current, iovector[i].iov_base, iovector[i].iov_len);
209 }
210 free(iovector);
211 if (cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER)
212 iob_addbuf(current, "\r\n", 2);
213 }
157 214
158 /* Will move to ot_iovec.c */ 215 if ((cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER) && cookie->batch && !is_partial) {
159 for( i=0; i<iovec_entries; ++i ) 216 current = cookie->batch + cookie->batches - 1;
160 iob_addbuf_munmap( &cookie->batch, iovector[i].iov_base, iovector[i].iov_len ); 217 iob_addbuf(current, "0\r\n\r\n", 5);
161 free( iovector ); 218 cookie->flag &= ~STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER;
219 }
162 220
163 /* writeable sockets timeout after 10 minutes */ 221 /* writeable sockets timeout after 10 minutes */
164 taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND ); 222 taia_now(&t);
165 io_timeout( sock, t ); 223 taia_addsec(&t, &t, OT_CLIENT_TIMEOUT_SEND);
166 io_dontwantread( sock ); 224 io_timeout(sock, t);
167 io_wantwrite( sock ); 225 io_wantwrite(sock);
168 return 0; 226 return 0;
169} 227}
170 228
171static ssize_t http_handle_stats( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { 229static ssize_t http_handle_stats(const int64 sock, struct ot_workstruct *ws, char *read_ptr) {
172static const ot_keywords keywords_main[] = 230 static const ot_keywords keywords_main[] = {{"mode", 1}, {"format", 2}, {"info_hash", 3}, {NULL, -3}};
173 { { "mode", 1 }, {"format", 2 }, { NULL, -3 } }; 231 static const ot_keywords keywords_mode[] = {{"peer", TASK_STATS_PEERS},
174static const ot_keywords keywords_mode[] = 232 {"conn", TASK_STATS_CONNS},
175 { { "peer", TASK_STATS_PEERS }, { "conn", TASK_STATS_CONNS }, { "scrp", TASK_STATS_SCRAPE }, { "udp4", TASK_STATS_UDP }, { "tcp4", TASK_STATS_TCP }, 233 {"scrp", TASK_STATS_SCRAPE},
176 { "busy", TASK_STATS_BUSY_NETWORKS }, { "torr", TASK_STATS_TORRENTS }, { "fscr", TASK_STATS_FULLSCRAPE }, 234 {"udp4", TASK_STATS_UDP},
177 { "s24s", TASK_STATS_SLASH24S }, { "tpbs", TASK_STATS_TPB }, { "herr", TASK_STATS_HTTPERRORS }, { "completed", TASK_STATS_COMPLETED }, 235 {"tcp4", TASK_STATS_TCP},
178 { "top100", TASK_STATS_TOP100 }, { "top10", TASK_STATS_TOP10 }, { "renew", TASK_STATS_RENEW }, { "syncs", TASK_STATS_SYNCS }, { "version", TASK_STATS_VERSION }, 236 {"busy", TASK_STATS_BUSY_NETWORKS},
179 { "everything", TASK_STATS_EVERYTHING }, { "statedump", TASK_FULLSCRAPE_TRACKERSTATE }, { "fulllog", TASK_STATS_FULLLOG }, 237 {"torr", TASK_STATS_TORRENTS},
180 { "woodpeckers", TASK_STATS_WOODPECKERS}, 238 {"fscr", TASK_STATS_FULLSCRAPE},
239 {"s24s", TASK_STATS_SLASH24S},
240 {"tpbs", TASK_STATS_TPB},
241 {"herr", TASK_STATS_HTTPERRORS},
242 {"completed", TASK_STATS_COMPLETED},
243 {"top100", TASK_STATS_TOP100},
244 {"top10", TASK_STATS_TOP10},
245 {"renew", TASK_STATS_RENEW},
246 {"syncs", TASK_STATS_SYNCS},
247 {"version", TASK_STATS_VERSION},
248 {"everything", TASK_STATS_EVERYTHING},
249 {"statedump", TASK_FULLSCRAPE_TRACKERSTATE},
250 {"fulllog", TASK_STATS_FULLLOG},
251 {"woodpeckers", TASK_STATS_WOODPECKERS},
181#ifdef WANT_LOG_NUMWANT 252#ifdef WANT_LOG_NUMWANT
182 { "numwants", TASK_STATS_NUMWANTS}, 253 {"numwants", TASK_STATS_NUMWANTS},
183#endif 254#endif
184 { NULL, -3 } }; 255 {NULL, -3}};
185static const ot_keywords keywords_format[] = 256 static const ot_keywords keywords_format[] = {{"bin", TASK_FULLSCRAPE_TPB_BINARY}, {"ben", TASK_FULLSCRAPE},
186 { { "bin", TASK_FULLSCRAPE_TPB_BINARY }, { "ben", TASK_FULLSCRAPE }, { "url", TASK_FULLSCRAPE_TPB_URLENCODED }, 257 {"url", TASK_FULLSCRAPE_TPB_URLENCODED}, {"txt", TASK_FULLSCRAPE_TPB_ASCII},
187 { "txt", TASK_FULLSCRAPE_TPB_ASCII }, { "txtp", TASK_FULLSCRAPE_TPB_ASCII_PLUS }, { NULL, -3 } }; 258 {"txtp", TASK_FULLSCRAPE_TPB_ASCII_PLUS}, {NULL, -3}};
188 259
189 int mode = TASK_STATS_PEERS, scanon = 1, format = 0; 260 int mode = TASK_STATS_PEERS, scanon = 1, format = 0;
190 261
191#ifdef WANT_RESTRICT_STATS 262#ifdef WANT_RESTRICT_STATS
192 struct http_data *cookie = io_getcookie( sock ); 263 struct http_data *cookie = io_getcookie(sock);
193 264
194 if( !cookie || !accesslist_isblessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) 265 if (!cookie || !accesslist_is_blessed(cookie->ip, OT_PERMISSION_MAY_STAT))
195 HTTPERROR_403_IP; 266 HTTPERROR_403_IP;
196#endif 267#endif
197 268
198 while( scanon ) { 269 while (scanon) {
199 switch( scan_find_keywords( keywords_main, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { 270 switch (scan_find_keywords(keywords_main, &read_ptr, SCAN_SEARCHPATH_PARAM)) {
200 case -2: scanon = 0; break; /* TERMINATOR */ 271 case -2:
201 case -1: HTTPERROR_400_PARAM; /* PARSE ERROR */ 272 scanon = 0;
202 case -3: scan_urlencoded_skipvalue( &read_ptr ); break; 273 break; /* TERMINATOR */
203 case 1: /* matched "mode" */ 274 case -1:
204 if( ( mode = scan_find_keywords( keywords_mode, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; 275 HTTPERROR_400_PARAM; /* PARSE ERROR */
276 case -3:
277 scan_urlencoded_skipvalue(&read_ptr);
205 break; 278 break;
206 case 2: /* matched "format" */ 279 case 1: /* matched "mode" */
207 if( ( format = scan_find_keywords( keywords_format, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; 280 if ((mode = scan_find_keywords(keywords_mode, &read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0)
281 HTTPERROR_400_PARAM;
282 break;
283 case 2: /* matched "format" */
284 if ((format = scan_find_keywords(keywords_format, &read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0)
285 HTTPERROR_400_PARAM;
208 break; 286 break;
287 case 3:
288 HTTPERROR_400_PARAM; /* If the stats URL was mistakenly added as announce URL, return a 400 */
209 } 289 }
210 } 290 }
211 291
212#ifdef WANT_FULLSCRAPE 292#ifdef WANT_FULLSCRAPE
213 if( mode == TASK_FULLSCRAPE_TRACKERSTATE ) { 293 if (mode == TASK_FULLSCRAPE_TRACKERSTATE) {
214 format = mode; mode = TASK_STATS_TPB; 294 format = mode;
295 mode = TASK_STATS_TPB;
215 } 296 }
216 297
217 if( mode == TASK_STATS_TPB ) { 298 if (mode == TASK_STATS_TPB) {
218 struct http_data* cookie = io_getcookie( sock ); 299 struct http_data *cookie = io_getcookie(sock);
219 tai6464 t; 300 tai6464 t;
220#ifdef WANT_COMPRESSION_GZIP 301#ifdef WANT_COMPRESSION_GZIP
221 ws->request[ws->request_size] = 0; 302 ws->request[ws->request_size] = 0;
222#ifdef WANT_COMPRESSION_GZIP_ALWAYS 303#ifndef WANT_COMPRESSION_GZIP_ALWAYS
223 if( strstr( read_ptr - 1, "gzip" ) ) { 304 if (strstr(read_ptr - 1, "gzip")) {
224#endif 305#endif
225 cookie->flag |= STRUCT_HTTP_FLAG_GZIP; 306 cookie->flag |= STRUCT_HTTP_FLAG_GZIP;
226 format |= TASK_FLAG_GZIP; 307 format |= TASK_FLAG_GZIP;
227#ifdef WANT_COMPRESSION_GZIP_ALWAYS 308#ifndef WANT_COMPRESSION_GZIP_ALWAYS
228 } 309 }
229#endif 310#endif
230#endif 311#endif
231 /* Pass this task to the worker thread */ 312 /* Pass this task to the worker thread */
232 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; 313 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED;
233 314
234 /* Clients waiting for us should not easily timeout */ 315 /* Clients waiting for us should not easily timeout */
235 taia_uint( &t, 0 ); io_timeout( sock, t ); 316 taia_uint(&t, 0);
236 fullscrape_deliver( sock, format ); 317 io_timeout(sock, t);
237 io_dontwantread( sock ); 318 fullscrape_deliver(sock, format);
319 io_dontwantread(sock);
238 return ws->reply_size = -2; 320 return ws->reply_size = -2;
239 } 321 }
240#endif 322#endif
241 323
242 /* default format for now */ 324 /* default format for now */
243 if( ( mode & TASK_CLASS_MASK ) == TASK_STATS ) { 325 if ((mode & TASK_CLASS_MASK) == TASK_STATS) {
244 tai6464 t; 326 tai6464 t;
245 /* Complex stats also include expensive memory debugging tools */ 327 /* Complex stats also include expensive memory debugging tools */
246 taia_uint( &t, 0 ); io_timeout( sock, t ); 328 taia_uint(&t, 0);
247 stats_deliver( sock, mode ); 329 io_timeout(sock, t);
330 stats_deliver(sock, mode);
248 return ws->reply_size = -2; 331 return ws->reply_size = -2;
249 } 332 }
250 333
251 /* Simple stats can be answerred immediately */ 334 /* Simple stats can be answerred immediately */
252 return ws->reply_size = return_stats_for_tracker( ws->reply, mode, 0 ); 335 return ws->reply_size = return_stats_for_tracker(ws->reply, mode, 0);
253} 336}
254 337
255#ifdef WANT_MODEST_FULLSCRAPES 338#ifdef WANT_MODEST_FULLSCRAPES
256static pthread_mutex_t g_modest_fullscrape_mutex = PTHREAD_MUTEX_INITIALIZER; 339static pthread_mutex_t g_modest_fullscrape_mutex = PTHREAD_MUTEX_INITIALIZER;
257static ot_vector g_modest_fullscrape_timeouts; 340static ot_vector g_modest_fullscrape_timeouts;
258typedef struct { ot_ip6 ip; ot_time last_fullscrape; } ot_scrape_log; 341typedef struct {
342 ot_ip6 ip;
343 ot_time last_fullscrape;
344} ot_scrape_log;
259#endif 345#endif
260 346
261#ifdef WANT_FULLSCRAPE 347#ifdef WANT_FULLSCRAPE
262static ssize_t http_handle_fullscrape( const int64 sock, struct ot_workstruct *ws ) { 348static ssize_t http_handle_fullscrape(const int64 sock, struct ot_workstruct *ws) {
263 struct http_data* cookie = io_getcookie( sock ); 349 struct http_data *cookie = io_getcookie(sock);
264 int format = 0; 350 int format = 0;
265 tai6464 t; 351 tai6464 t;
266 352
267#ifdef WANT_MODEST_FULLSCRAPES 353#ifdef WANT_MODEST_FULLSCRAPES
268 { 354 {
269 ot_scrape_log this_peer, *new_peer; 355 ot_scrape_log this_peer, *new_peer;
270 int exactmatch; 356 int exactmatch;
271 memcpy( this_peer.ip, cookie->ip, sizeof(ot_ip6)); 357 memcpy(this_peer.ip, cookie->ip, sizeof(ot_ip6));
272 this_peer.last_fullscrape = g_now_seconds; 358 this_peer.last_fullscrape = g_now_seconds;
273 pthread_mutex_lock(&g_modest_fullscrape_mutex); 359 pthread_mutex_lock(&g_modest_fullscrape_mutex);
274 new_peer = vector_find_or_insert( &g_modest_fullscrape_timeouts, &this_peer, sizeof(ot_scrape_log), sizeof(ot_ip6), &exactmatch ); 360 new_peer = vector_find_or_insert(&g_modest_fullscrape_timeouts, &this_peer, sizeof(ot_scrape_log), sizeof(ot_ip6), &exactmatch);
275 if( !new_peer ) { 361 if (!new_peer) {
276 pthread_mutex_unlock(&g_modest_fullscrape_mutex); 362 pthread_mutex_unlock(&g_modest_fullscrape_mutex);
277 HTTPERROR_500; 363 HTTPERROR_500;
278 } 364 }
279 if( exactmatch && ( this_peer.last_fullscrape - new_peer->last_fullscrape ) < OT_MODEST_PEER_TIMEOUT ) { 365 if (exactmatch && (this_peer.last_fullscrape - new_peer->last_fullscrape) < OT_MODEST_PEER_TIMEOUT) {
280 pthread_mutex_unlock(&g_modest_fullscrape_mutex); 366 pthread_mutex_unlock(&g_modest_fullscrape_mutex);
281 HTTPERROR_402_NOTMODEST; 367 HTTPERROR_402_NOTMODEST;
282 } 368 }
283 memcpy( new_peer, &this_peer, sizeof(ot_scrape_log)); 369 memcpy(new_peer, &this_peer, sizeof(ot_scrape_log));
284 pthread_mutex_unlock(&g_modest_fullscrape_mutex); 370 pthread_mutex_unlock(&g_modest_fullscrape_mutex);
285 } 371 }
286#endif 372#endif
287 373
374
375#if defined(WANT_COMPRESSION_GZIP) || defined(WANT_COMPRESSION_ZSTD)
376 ws->request[ws->request_size - 1] = 0;
288#ifdef WANT_COMPRESSION_GZIP 377#ifdef WANT_COMPRESSION_GZIP
289 ws->request[ws->request_size-1] = 0; 378 if (strstr(ws->request, "gzip")) {
290 if( strstr( ws->request, "gzip" ) ) {
291 cookie->flag |= STRUCT_HTTP_FLAG_GZIP; 379 cookie->flag |= STRUCT_HTTP_FLAG_GZIP;
292 format = TASK_FLAG_GZIP; 380 format |= TASK_FLAG_GZIP;
293 stats_issue_event( EVENT_FULLSCRAPE_REQUEST_GZIP, 0, (uintptr_t)cookie->ip ); 381 }
294 } else 382#endif
383#ifdef WANT_COMPRESSION_ZSTD
384 if (strstr(ws->request, "zstd")) {
385 cookie->flag |= STRUCT_HTTP_FLAG_ZSTD;
386 format |= TASK_FLAG_ZSTD;
387 }
388#endif
389
390#if defined(WANT_COMPRESSION_ZSTD) && defined(WANT_COMPRESSION_ZSTD_ALWAYS)
391 cookie->flag |= STRUCT_HTTP_FLAG_ZSTD;
392 format |= TASK_FLAG_ZSTD;
295#endif 393#endif
296 stats_issue_event( EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)cookie->ip ); 394
395#if defined(WANT_COMPRESSION_GZIP) && defined(WANT_COMPRESSION_GZIP_ALWAYS)
396 cookie->flag |= STRUCT_HTTP_FLAG_GZIP;
397 format |= TASK_FLAG_GZIP;
398#endif
399#endif
400
401 stats_issue_event(EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)cookie->ip);
297 402
298#ifdef _DEBUG_HTTPERROR 403#ifdef _DEBUG_HTTPERROR
299 fprintf( stderr, "%s", ws->debugbuf ); 404 fprintf(stderr, "%s", ws->debugbuf);
300#endif 405#endif
301 406
302 /* Pass this task to the worker thread */ 407 /* Pass this task to the worker thread */
303 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; 408 cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED;
304 /* Clients waiting for us should not easily timeout */ 409 /* Clients waiting for us should not easily timeout */
305 taia_uint( &t, 0 ); io_timeout( sock, t ); 410 taia_uint(&t, 0);
306 fullscrape_deliver( sock, TASK_FULLSCRAPE | format ); 411 io_timeout(sock, t);
307 io_dontwantread( sock ); 412 fullscrape_deliver(sock, TASK_FULLSCRAPE | format);
413 io_dontwantread(sock);
308 return ws->reply_size = -2; 414 return ws->reply_size = -2;
309} 415}
310#endif 416#endif
311 417
312static ssize_t http_handle_scrape( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { 418static ssize_t http_handle_scrape(const int64 sock, struct ot_workstruct *ws, char *read_ptr) {
313 static const ot_keywords keywords_scrape[] = { { "info_hash", 1 }, { NULL, -3 } }; 419 static const ot_keywords keywords_scrape[] = {{"info_hash", 1}, {NULL, -3}};
314 420
315 ot_hash * multiscrape_buf = (ot_hash*)ws->request; 421 ot_hash *multiscrape_buf = (ot_hash *)ws->request;
316 int scanon = 1, numwant = 0; 422 int scanon = 1, numwant = 0;
317 423
318 /* This is to hack around stupid clients that send "scrape ?info_hash" */ 424 /* This is to hack around stupid clients that send "scrape ?info_hash" */
319 if( read_ptr[-1] != '?' ) { 425 if (read_ptr[-1] != '?') {
320 while( ( *read_ptr != '?' ) && ( *read_ptr != '\n' ) ) ++read_ptr; 426 while ((*read_ptr != '?') && (*read_ptr != '\n'))
321 if( *read_ptr == '\n' ) HTTPERROR_400_PARAM; 427 ++read_ptr;
428 if (*read_ptr == '\n')
429 HTTPERROR_400_PARAM;
322 ++read_ptr; 430 ++read_ptr;
323 } 431 }
324 432
325 while( scanon ) { 433 while (scanon) {
326 switch( scan_find_keywords( keywords_scrape, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { 434 switch (scan_find_keywords(keywords_scrape, &read_ptr, SCAN_SEARCHPATH_PARAM)) {
327 case -2: scanon = 0; break; /* TERMINATOR */ 435 case -2:
328 default: HTTPERROR_400_PARAM; /* PARSE ERROR */ 436 scanon = 0;
329 case -3: scan_urlencoded_skipvalue( &read_ptr ); break; 437 break; /* TERMINATOR */
330 case 1: /* matched "info_hash" */ 438 default:
439 HTTPERROR_400_PARAM; /* PARSE ERROR */
440 case -3:
441 scan_urlencoded_skipvalue(&read_ptr);
442 break;
443 case 1: /* matched "info_hash" */
331 /* ignore this, when we have less than 20 bytes */ 444 /* ignore this, when we have less than 20 bytes */
332 if( scan_urlencoded_query( &read_ptr, (char*)(multiscrape_buf + numwant++), SCAN_SEARCHPATH_VALUE ) != (ssize_t)sizeof(ot_hash) ) 445 if (scan_urlencoded_query(&read_ptr, (char *)(multiscrape_buf + numwant++), SCAN_SEARCHPATH_VALUE) != (ssize_t)sizeof(ot_hash))
333 HTTPERROR_400_PARAM; 446 HTTPERROR_400_PARAM;
334 break; 447 break;
335 } 448 }
336 } 449 }
337 450
338 /* No info_hash found? Inform user */ 451 /* No info_hash found? Inform user */
339 if( !numwant ) HTTPERROR_400_PARAM; 452 if (!numwant)
453 HTTPERROR_400_PARAM;
340 454
341 /* Limit number of hashes to process */ 455 /* Limit number of hashes to process */
342 if( numwant > OT_MAXMULTISCRAPE_COUNT ) 456 if (numwant > OT_MAXMULTISCRAPE_COUNT)
343 numwant = OT_MAXMULTISCRAPE_COUNT; 457 numwant = OT_MAXMULTISCRAPE_COUNT;
344 458
345 /* Enough for http header + whole scrape string */ 459 /* Enough for http header + whole scrape string */
346 ws->reply_size = return_tcp_scrape_for_torrent( multiscrape_buf, numwant, ws->reply ); 460 ws->reply_size = return_tcp_scrape_for_torrent((const ot_hash *)multiscrape_buf, numwant, ws->reply);
347 stats_issue_event( EVENT_SCRAPE, FLAG_TCP, ws->reply_size ); 461 stats_issue_event(EVENT_SCRAPE, FLAG_TCP, ws->reply_size);
348 return ws->reply_size; 462 return ws->reply_size;
349} 463}
350 464
351#ifdef WANT_LOG_NUMWANT 465#ifdef WANT_LOG_NUMWANT
352 unsigned long long numwants[201]; 466unsigned long long numwants[201];
353#endif 467#endif
354 468
355#if defined( WANT_KEEPALIVE ) || defined( WANT_IP_FROM_PROXY ) 469#if defined(WANT_KEEPALIVE) || defined(WANT_IP_FROM_PROXY)
356static char* http_header( char *data, size_t byte_count, char *header ) { 470static char *http_header(char *data, size_t byte_count, char *header) {
357 size_t i; 471 size_t i;
358 long sl = strlen( header ); 472 long sl = strlen(header);
359 for( i = 0; i + sl + 2 < byte_count; ++i ) { 473 for (i = 0; i + sl + 2 < byte_count; ++i) {
360 if( data[i] != '\n' || data[ i + sl + 1] != ':' ) continue; 474 if (data[i] != '\n' || data[i + sl + 1] != ':')
361 if( !case_equalb( data + i + 1, sl, header ) ) continue; 475 continue;
476 if (!case_equalb(data + i + 1, sl, header))
477 continue;
362 data += i + sl + 2; 478 data += i + sl + 2;
363 while( *data == ' ' || *data == '\t' ) ++data; 479 while (*data == ' ' || *data == '\t')
480 ++data;
364 return data; 481 return data;
365 } 482 }
366 return 0; 483 return 0;
367} 484}
368#endif 485#endif
369 486
370static ot_keywords keywords_announce[] = { { "port", 1 }, { "left", 2 }, { "event", 3 }, { "numwant", 4 }, { "compact", 5 }, { "compact6", 5 }, { "info_hash", 6 }, 487static ot_keywords keywords_announce[] = {{"port", 1}, {"left", 2}, {"event", 3}, {"numwant", 4}, {"compact", 5}, {"compact6", 5}, {"info_hash", 6},
371#ifdef WANT_IP_FROM_QUERY_STRING 488#ifdef WANT_IP_FROM_QUERY_STRING
372{ "ip", 7 }, 489 {"ip", 7},
373#endif 490#endif
374#ifdef WANT_FULLLOG_NETWORKS 491#ifdef WANT_FULLLOG_NETWORKS
375{ "lognet", 8 }, 492 {"lognet", 8},
376#endif 493#endif
377{ "peer_id", 9 }, 494 {"peer_id", 9}, {NULL, -3}};
378{ NULL, -3 } }; 495static ot_keywords keywords_announce_event[] = {{"completed", 1}, {"stopped", 2}, {NULL, -3}};
379static ot_keywords keywords_announce_event[] = { { "completed", 1 }, { "stopped", 2 }, { NULL, -3 } }; 496static ssize_t http_handle_announce(const int64 sock, struct ot_workstruct *ws, char *read_ptr) {
380static ssize_t http_handle_announce( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) {
381 int numwant, tmp, scanon; 497 int numwant, tmp, scanon;
382 unsigned short port = 0; 498 unsigned short port = 0;
383 char *write_ptr; 499 char *write_ptr;
384 ssize_t len; 500 ssize_t len;
385 struct http_data *cookie = io_getcookie( sock ); 501 struct http_data *cookie = io_getcookie(sock);
386 502
387 /* This is to hack around stupid clients that send "announce ?info_hash" */ 503 /* This is to hack around stupid clients that send "announce ?info_hash" */
388 if( read_ptr[-1] != '?' ) { 504 if (read_ptr[-1] != '?') {
389 while( ( *read_ptr != '?' ) && ( *read_ptr != '\n' ) ) ++read_ptr; 505 while ((*read_ptr != '?') && (*read_ptr != '\n'))
390 if( *read_ptr == '\n' ) HTTPERROR_400_PARAM; 506 ++read_ptr;
507 if (*read_ptr == '\n')
508 HTTPERROR_400_PARAM;
391 ++read_ptr; 509 ++read_ptr;
392 } 510 }
393 511
394#ifdef WANT_IP_FROM_PROXY 512#ifdef WANT_IP_FROM_PROXY
395 if( accesslist_isblessed( cookie->ip, OT_PERMISSION_MAY_PROXY ) ) { 513 if (accesslist_is_blessed(cookie->ip, OT_PERMISSION_MAY_PROXY)) {
396 ot_ip6 proxied_ip; 514 ot_ip6 proxied_ip;
397 char *fwd = http_header( ws->request, ws->header_size, "x-forwarded-for" ); 515 char *fwd = http_header(ws->request, ws->header_size, "x-forwarded-for");
398 if( fwd && scan_ip6( fwd, proxied_ip ) ) 516 if (fwd && scan_ip6(fwd, proxied_ip)) {
399 OT_SETIP( &ws->peer, proxied_ip ); 517 OT_SETIP(ws->peer, proxied_ip);
400 else 518 } else
401 OT_SETIP( &ws->peer, cookie->ip ); 519 OT_SETIP(ws->peer, cookie->ip);
402 } else 520 } else
403#endif 521#endif
404 OT_SETIP( &ws->peer, cookie->ip ); 522 OT_SETIP(ws->peer, cookie->ip);
405 523
406 ws->peer_id = NULL; 524 ws->peer_id = NULL;
407 ws->hash = NULL; 525 ws->hash = NULL;
408 526
409 OT_SETPORT( &ws->peer, &port ); 527 OT_SETPORT(ws->peer, &port);
410 OT_PEERFLAG( &ws->peer ) = 0; 528 OT_PEERFLAG(ws->peer) = 0;
411 numwant = 50; 529 numwant = 50;
412 scanon = 1; 530 scanon = 1;
413 531
414 while( scanon ) { 532 while (scanon) {
415 switch( scan_find_keywords(keywords_announce, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { 533 switch (scan_find_keywords(keywords_announce, &read_ptr, SCAN_SEARCHPATH_PARAM)) {
416 case -2: scanon = 0; break; /* TERMINATOR */ 534 case -2:
417 case -1: HTTPERROR_400_PARAM; /* PARSE ERROR */ 535 scanon = 0;
418 case -3: scan_urlencoded_skipvalue( &read_ptr ); break; 536 break; /* TERMINATOR */
537 case -1:
538 HTTPERROR_400_PARAM; /* PARSE ERROR */
539 case -3:
540 scan_urlencoded_skipvalue(&read_ptr);
541 break;
419 case 1: /* matched "port" */ 542 case 1: /* matched "port" */
420 len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); 543 len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE);
421 if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &tmp ) || ( tmp > 0xffff ) ) HTTPERROR_400_PARAM; 544 if ((len <= 0) || scan_fixed_int(write_ptr, len, &tmp) || (tmp > 0xffff))
422 port = htons( tmp ); OT_SETPORT( &ws->peer, &port ); 545 HTTPERROR_400_PARAM;
546 port = htons(tmp);
547 OT_SETPORT(&ws->peer, &port);
423 break; 548 break;
424 case 2: /* matched "left" */ 549 case 2: /* matched "left" */
425 if( ( len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; 550 if ((len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0)
426 if( scan_fixed_int( write_ptr, len, &tmp ) ) tmp = 0; 551 HTTPERROR_400_PARAM;
427 if( !tmp ) OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_SEEDING; 552 if (scan_fixed_int(write_ptr, len, &tmp))
553 tmp = 0;
554 if (!tmp)
555 OT_PEERFLAG(&ws->peer) |= PEER_FLAG_SEEDING;
428 break; 556 break;
429 case 3: /* matched "event" */ 557 case 3: /* matched "event" */
430 switch( scan_find_keywords( keywords_announce_event, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) { 558 switch (scan_find_keywords(keywords_announce_event, &read_ptr, SCAN_SEARCHPATH_VALUE)) {
431 case -1: HTTPERROR_400_PARAM; 559 case -1:
432 case 1: /* matched "completed" */ 560 HTTPERROR_400_PARAM;
433 OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED; 561 case 1: /* matched "completed" */
434 break; 562 OT_PEERFLAG(&ws->peer) |= PEER_FLAG_COMPLETED;
435 case 2: /* matched "stopped" */ 563 break;
436 OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED; 564 case 2: /* matched "stopped" */
437 break; 565 OT_PEERFLAG(&ws->peer) |= PEER_FLAG_STOPPED;
438 default: 566 break;
439 break; 567 default:
568 break;
440 } 569 }
441 break; 570 break;
442 case 4: /* matched "numwant" */ 571 case 4: /* matched "numwant" */
443 len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); 572 len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE);
444 if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &numwant ) ) HTTPERROR_400_PARAM; 573 if ((len <= 0) || scan_fixed_int(write_ptr, len, &numwant))
445 if( numwant < 0 ) numwant = 50; 574 HTTPERROR_400_PARAM;
446 if( numwant > 200 ) numwant = 200; 575 if (numwant < 0)
576 numwant = 50;
577 if (numwant > 200)
578 numwant = 200;
447 break; 579 break;
448 case 5: /* matched "compact" */ 580 case 5: /* matched "compact" */
449 len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); 581 len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE);
450 if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &tmp ) ) HTTPERROR_400_PARAM; 582 if ((len <= 0) || scan_fixed_int(write_ptr, len, &tmp))
451 if( !tmp ) HTTPERROR_400_COMPACT; 583 HTTPERROR_400_PARAM;
584 if (!tmp)
585 HTTPERROR_400_COMPACT;
452 break; 586 break;
453 case 6: /* matched "info_hash" */ 587 case 6: /* matched "info_hash" */
454 if( ws->hash ) HTTPERROR_400_DOUBLEHASH; 588 if (ws->hash)
589 HTTPERROR_400_DOUBLEHASH;
455 /* ignore this, when we have less than 20 bytes */ 590 /* ignore this, when we have less than 20 bytes */
456 if( scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) != 20 ) HTTPERROR_400_PARAM; 591 if (scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE) != 20)
457 ws->hash = (ot_hash*)write_ptr; 592 HTTPERROR_400_PARAM;
593 ws->hash = (ot_hash *)write_ptr;
458 break; 594 break;
459#ifdef WANT_IP_FROM_QUERY_STRING 595#ifdef WANT_IP_FROM_QUERY_STRING
460 case 7: /* matched "ip" */ 596 case 7: /* matched "ip" */
461 { 597 {
462 char *tmp_buf1 = ws->reply, *tmp_buf2 = ws->reply+16; 598 char *tmp_buf1 = ws->reply, *tmp_buf2 = ws->reply + 16;
463 len = scan_urlencoded_query( &read_ptr, tmp_buf2, SCAN_SEARCHPATH_VALUE ); 599 len = scan_urlencoded_query(&read_ptr, tmp_buf2, SCAN_SEARCHPATH_VALUE);
464 tmp_buf2[len] = 0; 600 tmp_buf2[len] = 0;
465 if( ( len <= 0 ) || !scan_ip6( tmp_buf2, tmp_buf1 ) ) HTTPERROR_400_PARAM; 601 if ((len <= 0) || !scan_ip6(tmp_buf2, tmp_buf1))
466 OT_SETIP( &ws->peer, tmp_buf1 ); 602 HTTPERROR_400_PARAM;
467 } 603 OT_SETIP(&ws->peer, tmp_buf1);
468 break; 604 } break;
469#endif 605#endif
470#ifdef WANT_FULLLOG_NETWORKS 606#ifdef WANT_FULLLOG_NETWORKS
471 case 8: /* matched "lognet" */ 607 case 8: /* matched "lognet" */
472 { 608 {
473 //if( accesslist_isblessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) { 609 // if( accesslist_is_blessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) {
474 char *tmp_buf = ws->reply; 610 char *tmp_buf = ws->reply;
475 ot_net net; 611 ot_net net;
476 signed short parsed, bits; 612 signed short parsed, bits;
477 613
478 len = scan_urlencoded_query( &read_ptr, tmp_buf, SCAN_SEARCHPATH_VALUE ); 614 len = scan_urlencoded_query(&read_ptr, tmp_buf, SCAN_SEARCHPATH_VALUE);
479 tmp_buf[len] = 0; 615 tmp_buf[len] = 0;
480 if( len <= 0 ) HTTPERROR_400_PARAM; 616 if (len <= 0)
481 if( *tmp_buf == '-' ) { 617 HTTPERROR_400_PARAM;
482 loglist_reset( ); 618 if (*tmp_buf == '-') {
483 return ws->reply_size = sprintf( ws->reply, "Successfully removed.\n" ); 619 loglist_reset();
484 } 620 return ws->reply_size = sprintf(ws->reply, "Successfully removed.\n");
485 parsed = scan_ip6( tmp_buf, net.address );
486 if( !parsed ) HTTPERROR_400_PARAM;
487 if( tmp_buf[parsed++] != '/' )
488 bits = 128;
489 else {
490 parsed = scan_short( tmp_buf + parsed, &bits );
491 if( !parsed ) HTTPERROR_400_PARAM;
492 if( ip6_isv4mapped( net.address ) )
493 bits += 96;
494 }
495 net.bits = bits;
496 loglist_add_network( &net );
497 return ws->reply_size = sprintf( ws->reply, "Successfully added.\n" );
498 //}
499 } 621 }
500 break; 622 parsed = scan_ip6(tmp_buf, net.address);
623 if (!parsed)
624 HTTPERROR_400_PARAM;
625 if (tmp_buf[parsed++] != '/')
626 bits = 128;
627 else {
628 parsed = scan_short(tmp_buf + parsed, &bits);
629 if (!parsed)
630 HTTPERROR_400_PARAM;
631 if (ip6_isv4mapped(net.address))
632 bits += 96;
633 }
634 net.bits = bits;
635 loglist_add_network(&net);
636 return ws->reply_size = sprintf(ws->reply, "Successfully added.\n");
637 //}
638 } break;
501#endif 639#endif
502 case 9: /* matched "peer_id" */ 640 case 9: /* matched "peer_id" */
503 /* ignore this, when we have less than 20 bytes */ 641 /* ignore this, when we have less than 20 bytes */
504 if( scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) != 20 ) HTTPERROR_400_PARAM; 642 if (scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE) != 20)
505 ws->peer_id = write_ptr; 643 HTTPERROR_400_PARAM;
506 break; 644 ws->peer_id = write_ptr;
645 break;
507 } 646 }
508 } 647 }
509 648
@@ -516,100 +655,107 @@ static ssize_t http_handle_announce( const int64 sock, struct ot_workstruct *ws,
516 */ 655 */
517 656
518 /* Scanned whole query string */ 657 /* Scanned whole query string */
519 if( !ws->hash ) 658 if (!ws->hash)
520 return ws->reply_size = sprintf( ws->reply, "d14:failure reason80:Your client forgot to send your torrent's info_hash. Please upgrade your client.e" ); 659 return ws->reply_size = sprintf(ws->reply, "d14:failure reason80:Your client forgot to send your torrent's info_hash. Please upgrade your client.e");
521 660
522 if( OT_PEERFLAG( &ws->peer ) & PEER_FLAG_STOPPED ) 661 if (OT_PEERFLAG(&ws->peer) & PEER_FLAG_STOPPED)
523 ws->reply_size = remove_peer_from_torrent( FLAG_TCP, ws ); 662 ws->reply_size = remove_peer_from_torrent(FLAG_TCP, ws);
524 else 663 else
525 ws->reply_size = add_peer_to_torrent_and_return_peers( FLAG_TCP, ws, numwant ); 664 ws->reply_size = add_peer_to_torrent_and_return_peers(FLAG_TCP, ws, numwant);
526 665
527 stats_issue_event( EVENT_ANNOUNCE, FLAG_TCP, ws->reply_size); 666 stats_issue_event(EVENT_ANNOUNCE, FLAG_TCP, ws->reply_size);
528 return ws->reply_size; 667 return ws->reply_size;
529} 668}
530 669
531ssize_t http_handle_request( const int64 sock, struct ot_workstruct *ws ) { 670ssize_t http_handle_request(const int64 sock, struct ot_workstruct *ws) {
532 ssize_t reply_off, len; 671 ssize_t reply_off, len;
533 char *read_ptr = ws->request, *write_ptr; 672 char *read_ptr = ws->request, *write_ptr;
534 673
535#ifdef WANT_FULLLOG_NETWORKS 674#ifdef WANT_FULLLOG_NETWORKS
536 struct http_data *cookie = io_getcookie( sock ); 675 struct http_data *cookie = io_getcookie(sock);
537 if( loglist_check_address( cookie->ip ) ) { 676 if (loglist_check_address(cookie->ip)) {
538 ot_log *log = malloc( sizeof( ot_log ) ); 677 ot_log *log = malloc(sizeof(ot_log));
539 if( log ) { 678 if (log) {
540 log->size = ws->request_size; 679 log->size = ws->request_size;
541 log->data = malloc( ws->request_size ); 680 log->data = malloc(ws->request_size);
542 log->next = 0; 681 log->next = 0;
543 log->time = g_now_seconds; 682 log->time = g_now_seconds;
544 memcpy( log->ip, cookie->ip, sizeof(ot_ip6)); 683 memcpy(log->ip, cookie->ip, sizeof(ot_ip6));
545 if( log->data ) { 684 if (log->data) {
546 memcpy( log->data, ws->request, ws->request_size ); 685 memcpy(log->data, ws->request, ws->request_size);
547 if( !g_logchain_first ) 686 if (!g_logchain_first)
548 g_logchain_first = g_logchain_last = log; 687 g_logchain_first = g_logchain_last = log;
549 else { 688 else {
550 g_logchain_last->next = log; 689 g_logchain_last->next = log;
551 g_logchain_last = log; 690 g_logchain_last = log;
552 } 691 }
553 } else 692 } else
554 free( log ); 693 free(log);
555 } 694 }
556 } 695 }
557#endif 696#endif
558 697
559#ifdef _DEBUG_HTTPERROR 698#ifdef _DEBUG_HTTPERROR
560 reply_off = ws->request_size; 699 reply_off = ws->request_size;
561 if( ws->request_size >= G_DEBUGBUF_SIZE ) 700 if (ws->request_size >= G_DEBUGBUF_SIZE)
562 reply_off = G_DEBUGBUF_SIZE - 1; 701 reply_off = G_DEBUGBUF_SIZE - 1;
563 memcpy( ws->debugbuf, ws->request, reply_off ); 702 memcpy(ws->debugbuf, ws->request, reply_off);
564 ws->debugbuf[ reply_off ] = 0; 703 ws->debugbuf[reply_off] = 0;
565#endif 704#endif
566 705
567 /* Tell subroutines where to put reply data */ 706 /* Tell subroutines where to put reply data */
568 ws->reply = ws->outbuf + SUCCESS_HTTP_HEADER_LENGTH; 707 ws->reply = ws->outbuf + SUCCESS_HTTP_HEADER_LENGTH;
569 708
570 /* This one implicitely tests strlen < 5, too -- remember, it is \n terminated */ 709 /* This one implicitely tests strlen < 5, too -- remember, it is \n terminated */
571 if( memcmp( read_ptr, "GET /", 5) ) HTTPERROR_400; 710 if (memcmp(read_ptr, "GET /", 5))
711 HTTPERROR_400;
572 712
573 /* Skip leading '/' */ 713 /* Skip leading '/' */
574 for( read_ptr+=4; *read_ptr == '/'; ++read_ptr); 714 for (read_ptr += 4; *read_ptr == '/'; ++read_ptr)
715 ;
575 716
576 /* Try to parse the request. 717 /* Try to parse the request.
577 In reality we abandoned requiring the url to be correct. This now 718 In reality we abandoned requiring the url to be correct. This now
578 only decodes url encoded characters, we check for announces and 719 only decodes url encoded characters, we check for announces and
579 scrapes by looking for "a*" or "sc" */ 720 scrapes by looking for "a*" or "sc" */
580 len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_PATH ); 721 len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_PATH);
581 722
582 /* If parsing returned an error, leave with not found */ 723 /* If parsing returned an error, leave with not found */
583 if( g_redirecturl && ( len == -2 ) ) HTTPERROR_302; 724 if (g_redirecturl && (len == -2))
584 if( len <= 0 ) HTTPERROR_404; 725 HTTPERROR_302;
726 if (len <= 0)
727 HTTPERROR_404;
585 728
586 /* This is the hardcore match for announce*/ 729 /* This is the hardcore match for announce*/
587 if( ( *write_ptr == 'a' ) || ( *write_ptr == '?' ) ) 730 if ((*write_ptr == 'a') || (*write_ptr == '?'))
588 http_handle_announce( sock, ws, read_ptr ); 731 http_handle_announce(sock, ws, read_ptr);
589#ifdef WANT_FULLSCRAPE 732#ifdef WANT_FULLSCRAPE
590 else if( !memcmp( write_ptr, "scrape HTTP/", 12 ) ) 733 else if (!memcmp(write_ptr, "scrape HTTP/", 12))
591 http_handle_fullscrape( sock, ws ); 734 http_handle_fullscrape(sock, ws);
592#endif 735#endif
593 /* This is the hardcore match for scrape */ 736 /* This is the hardcore match for scrape */
594 else if( !memcmp( write_ptr, "sc", 2 ) ) 737 else if (!memcmp(write_ptr, "sc", 2))
595 http_handle_scrape( sock, ws, read_ptr ); 738 http_handle_scrape(sock, ws, read_ptr);
596 /* All the rest is matched the standard way */ 739 /* All the rest is matched the standard way */
597 else if( len == g_stats_path_len && !memcmp( write_ptr, g_stats_path, len ) ) 740 else if (len == g_stats_path_len && !memcmp(write_ptr, g_stats_path, len))
598 http_handle_stats( sock, ws, read_ptr ); 741 http_handle_stats(sock, ws, read_ptr);
599 else 742 else
600 HTTPERROR_404; 743 HTTPERROR_404;
601 744
602 /* Find out if the client wants to keep this connection alive */ 745 /* Find out if the client wants to keep this connection alive */
603 ws->keep_alive = 0; 746 ws->keep_alive = 0;
604#ifdef WANT_KEEPALIVE 747#ifdef WANT_KEEPALIVE
605 read_ptr=http_header( ws->request, ws->header_size, "connection"); 748 read_ptr = http_header(ws->request, ws->header_size, "connection");
606 if( read_ptr && ( *read_ptr == 'K' || *read_ptr == 'k' ) ) ws->keep_alive = 1; 749 if (read_ptr && (*read_ptr == 'K' || *read_ptr == 'k'))
750 ws->keep_alive = 1;
607#endif 751#endif
608 752
609 /* If routines handled sending themselves, just return */ 753 /* If routines handled sending themselves, just return */
610 if( ws->reply_size == -2 ) return 0; 754 if (ws->reply_size == -2)
755 return 0;
611 /* If routine failed, let http error take over */ 756 /* If routine failed, let http error take over */
612 if( ws->reply_size <= 0 ) HTTPERROR_500; 757 if (ws->reply_size <= 0)
758 HTTPERROR_500;
613 759
614 /* This one is rather ugly, so I take you step by step through it. 760 /* This one is rather ugly, so I take you step by step through it.
615 761
@@ -618,18 +764,16 @@ ssize_t http_handle_request( const int64 sock, struct ot_workstruct *ws ) {
618 plus dynamic space needed to expand our Content-Length value. We reserve SUCCESS_HTTP_SIZE_OFF for its expansion and calculate 764 plus dynamic space needed to expand our Content-Length value. We reserve SUCCESS_HTTP_SIZE_OFF for its expansion and calculate
619 the space NOT needed to expand in reply_off 765 the space NOT needed to expand in reply_off
620 */ 766 */
621 reply_off = SUCCESS_HTTP_SIZE_OFF - snprintf( ws->outbuf, 0, "%zd", ws->reply_size ); 767 reply_off = SUCCESS_HTTP_SIZE_OFF - snprintf(ws->outbuf, 0, "%zd", ws->reply_size);
622 ws->reply = ws->outbuf + reply_off; 768 ws->reply = ws->outbuf + reply_off;
623 769
624 /* 2. Now we sprintf our header so that sprintf writes its terminating '\0' exactly one byte before content starts. Complete 770 /* 2. Now we sprintf our header so that sprintf writes its terminating '\0' exactly one byte before content starts. Complete
625 packet size is increased by size of header plus one byte '\n', we will copy over '\0' in next step */ 771 packet size is increased by size of header plus one byte '\n', we will copy over '\0' in next step */
626 ws->reply_size += 1 + sprintf( ws->reply, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r", ws->reply_size ); 772 ws->reply_size += 1 + sprintf(ws->reply, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r", ws->reply_size);
627 773
628 /* 3. Finally we join both blocks neatly */ 774 /* 3. Finally we join both blocks neatly */
629 ws->outbuf[ SUCCESS_HTTP_HEADER_LENGTH - 1 ] = '\n'; 775 ws->outbuf[SUCCESS_HTTP_HEADER_LENGTH - 1] = '\n';
630 776
631 http_senddata( sock, ws ); 777 http_senddata(sock, ws);
632 return ws->reply_size; 778 return ws->reply_size;
633} 779}
634
635const char *g_version_http_c = "$Source$: $Revision$\n";
diff --git a/ot_http.h b/ot_http.h
index b1a60e7..b5ae9ff 100644
--- a/ot_http.h
+++ b/ot_http.h
@@ -7,21 +7,25 @@
7#define OT_HTTP_H__ 7#define OT_HTTP_H__
8 8
9typedef enum { 9typedef enum {
10 STRUCT_HTTP_FLAG_WAITINGFORTASK = 1, 10 STRUCT_HTTP_FLAG_WAITINGFORTASK = 1,
11 STRUCT_HTTP_FLAG_GZIP = 2, 11 STRUCT_HTTP_FLAG_GZIP = 2,
12 STRUCT_HTTP_FLAG_BZIP2 = 4 12 STRUCT_HTTP_FLAG_BZIP2 = 4,
13 STRUCT_HTTP_FLAG_ZSTD = 8,
14 STRUCT_HTTP_FLAG_CHUNKED = 16,
15 STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER = 32
13} STRUCT_HTTP_FLAG; 16} STRUCT_HTTP_FLAG;
14 17
15struct http_data { 18struct http_data {
16 array request; 19 array request;
17 io_batch batch; 20 io_batch *batch;
21 size_t batches;
18 ot_ip6 ip; 22 ot_ip6 ip;
19 STRUCT_HTTP_FLAG flag; 23 STRUCT_HTTP_FLAG flag;
20}; 24};
21 25
22ssize_t http_handle_request( const int64 s, struct ot_workstruct *ws ); 26ssize_t http_handle_request(const int64 s, struct ot_workstruct *ws);
23ssize_t http_sendiovecdata( const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ); 27ssize_t http_sendiovecdata(const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial);
24ssize_t http_issue_error( const int64 s, struct ot_workstruct *ws, int code ); 28ssize_t http_issue_error(const int64 s, struct ot_workstruct *ws, int code);
25 29
26extern char *g_stats_path; 30extern char *g_stats_path;
27extern ssize_t g_stats_path_len; 31extern ssize_t g_stats_path_len;
diff --git a/ot_iovec.c b/ot_iovec.c
index ec0bd12..8e94c52 100644
--- a/ot_iovec.c
+++ b/ot_iovec.c
@@ -4,73 +4,89 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <sys/types.h>
8#include <sys/mman.h>
9#include <stdlib.h> 7#include <stdlib.h>
10#include <unistd.h> 8#include <sys/types.h>
11#include <sys/uio.h> 9#include <sys/uio.h>
10#include <unistd.h>
12 11
13/* Libowfat */ 12/* Libowfat */
14 13
15/* Opentracker */ 14/* Opentracker */
16#include "ot_iovec.h" 15#include "ot_iovec.h"
17 16
18void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc ) { 17void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc) {
19 void *new_ptr = realloc( *iovector, (1 + *iovec_entries ) * sizeof( struct iovec ) ); 18 void *new_data;
20 if( !new_ptr ) 19 int new_entries = 1 + *iovec_entries;
20 struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec));
21
22 if (!new_vec)
21 return NULL; 23 return NULL;
22 *iovector = new_ptr; 24
23 new_ptr = mmap( NULL, new_alloc, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0 ); 25 /* Only allocate after we have a place to store the pointer */
24 if( !new_ptr ) 26 new_data = malloc(new_alloc);
27 if (!new_data)
25 return NULL; 28 return NULL;
26 ((*iovector)[*iovec_entries]).iov_base = new_ptr; 29
27 ((*iovector)[*iovec_entries]).iov_len = new_alloc; 30 new_vec[new_entries - 1].iov_base = new_data;
31 new_vec[new_entries - 1].iov_len = new_alloc;
32
33 *iovector = new_vec;
28 ++*iovec_entries; 34 ++*iovec_entries;
29 return new_ptr; 35 return new_data;
36}
37
38void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector) {
39 int new_entries = *iovec_entries + 1;
40 struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec));
41 if (!new_vec)
42 return NULL;
43
44 /* Take over data from appended iovec */
45 new_vec[*iovec_entries].iov_base = append_iovector->iov_base;
46 new_vec[*iovec_entries].iov_len = append_iovector->iov_len;
47
48 append_iovector->iov_base = NULL;
49 append_iovector->iov_len = 0;
50
51 *iovector = new_vec;
52 *iovec_entries = new_entries;
53
54 return new_vec;
30} 55}
31 56
32void iovec_free( int *iovec_entries, struct iovec **iovector ) { 57void iovec_free(int *iovec_entries, struct iovec **iovector) {
33 int i; 58 int i;
34 for( i=0; i<*iovec_entries; ++i ) 59 for (i = 0; i < *iovec_entries; ++i)
35 munmap( ((*iovector)[i]).iov_base, ((*iovector)[i]).iov_len ); 60 free(((*iovector)[i]).iov_base);
61 *iovector = NULL;
36 *iovec_entries = 0; 62 *iovec_entries = 0;
37} 63}
38 64
39void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr ) { 65void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr) {
40 int page_size = getpagesize(); 66 if (*iovec_entries) {
41 size_t old_alloc, new_alloc, old_pages, new_pages; 67 char *base = (char *)((*iovector)[*iovec_entries - 1]).iov_base;
42 char * base = (char*)((*iovector)[ *iovec_entries - 1 ]).iov_base; 68 size_t new_alloc = ((char *)last_ptr) - base;
43 69
44 if( !*iovec_entries ) return; 70 ((*iovector)[*iovec_entries - 1]).iov_base = realloc(base, new_alloc);
45 71 ((*iovector)[*iovec_entries - 1]).iov_len = new_alloc;
46 old_alloc = ((*iovector)[ *iovec_entries - 1 ]).iov_len; 72 }
47 new_alloc = ((char*)last_ptr) - base;
48 old_pages = 1 + old_alloc / page_size;
49 new_pages = 1 + new_alloc / page_size;
50
51 if( old_pages != new_pages )
52 munmap( base + new_pages * page_size, old_alloc - new_pages * page_size );
53 ((*iovector)[*iovec_entries - 1 ]).iov_len = new_alloc;
54} 73}
55 74
56void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc ) { 75void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc) {
57 void *new_ptr; 76 void *new_data;
58 77
59 iovec_fixlast( iovec_entries, iovector, last_ptr ); 78 iovec_fixlast(iovec_entries, iovector, last_ptr);
60 79
61 if( !( new_ptr = iovec_increase( iovec_entries, iovector, new_alloc ) ) ) 80 if (!(new_data = iovec_increase(iovec_entries, iovector, new_alloc)))
62 iovec_free( iovec_entries, iovector ); 81 iovec_free(iovec_entries, iovector);
63 82
64 return new_ptr; 83 return new_data;
65} 84}
66 85
67 86size_t iovec_length(const int *iovec_entries, const struct iovec **iovector) {
68size_t iovec_length( int *iovec_entries, struct iovec **iovector ) {
69 size_t length = 0; 87 size_t length = 0;
70 int i; 88 int i;
71 for( i=0; i<*iovec_entries; ++i ) 89 for (i = 0; i < *iovec_entries; ++i)
72 length += ((*iovector)[i]).iov_len; 90 length += ((*iovector)[i]).iov_len;
73 return length; 91 return length;
74} 92}
75
76const char *g_version_iovec_c = "$Source$: $Revision$\n";
diff --git a/ot_iovec.h b/ot_iovec.h
index 5dbe706..4317ab7 100644
--- a/ot_iovec.h
+++ b/ot_iovec.h
@@ -8,12 +8,13 @@
8 8
9#include <sys/uio.h> 9#include <sys/uio.h>
10 10
11void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc ); 11void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc);
12void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr ); 12void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector);
13void iovec_free( int *iovec_entries, struct iovec **iovector ); 13void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr);
14void iovec_free(int *iovec_entries, struct iovec **iovector);
14 15
15size_t iovec_length( int *iovec_entries, struct iovec **iovector ); 16size_t iovec_length(const int *iovec_entries, const struct iovec **iovector);
16 17
17void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc ); 18void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc);
18 19
19#endif 20#endif
diff --git a/ot_livesync.c b/ot_livesync.c
index cded0f7..269b8d8 100644
--- a/ot_livesync.c
+++ b/ot_livesync.c
@@ -4,204 +4,228 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <pthread.h>
8#include <stdlib.h>
9#include <string.h>
7#include <sys/types.h> 10#include <sys/types.h>
8#include <sys/uio.h> 11#include <sys/uio.h>
9#include <string.h>
10#include <pthread.h>
11#include <unistd.h> 12#include <unistd.h>
12#include <stdlib.h>
13 13
14/* Libowfat */ 14/* Libowfat */
15#include "socket.h"
16#include "ndelay.h"
17#include "byte.h" 15#include "byte.h"
18#include "ip6.h" 16#include "ip6.h"
17#include "ndelay.h"
18#include "socket.h"
19 19
20/* Opentracker */ 20/* Opentracker */
21#include "trackerlogic.h"
22#include "ot_livesync.h"
23#include "ot_accesslist.h" 21#include "ot_accesslist.h"
24#include "ot_stats.h" 22#include "ot_livesync.h"
25#include "ot_mutex.h" 23#include "ot_mutex.h"
24#include "ot_stats.h"
25#include "trackerlogic.h"
26 26
27#ifdef WANT_SYNC_LIVE 27#ifdef WANT_SYNC_LIVE
28 28
29char groupip_1[4] = { 224,0,23,5 }; 29char groupip_1[4] = {224, 0, 23, 5};
30 30
31#define LIVESYNC_INCOMING_BUFFSIZE (256*256) 31#define LIVESYNC_INCOMING_BUFFSIZE (256 * 256)
32 32
33#define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480 33#define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480
34#define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer)+sizeof(ot_hash)) 34#define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer) + sizeof(ot_hash))
35 35
36#define LIVESYNC_MAXDELAY 15 /* seconds */ 36#define LIVESYNC_MAXDELAY 15 /* seconds */
37 37
38enum { OT_SYNC_PEER }; 38enum { OT_SYNC_PEER4, OT_SYNC_PEER6 };
39 39
40/* Forward declaration */ 40/* Forward declaration */
41static void * livesync_worker( void * args ); 41static void *livesync_worker(void *args);
42 42
43/* For outgoing packets */ 43/* For outgoing packets */
44static int64 g_socket_in = -1; 44static int64 g_socket_in = -1;
45 45
46/* For incoming packets */ 46/* For incoming packets */
47static int64 g_socket_out = -1; 47static int64 g_socket_out = -1;
48 48
49static pthread_mutex_t g_outbuf_mutex = PTHREAD_MUTEX_INITIALIZER; 49static pthread_mutex_t g_outbuf_mutex = PTHREAD_MUTEX_INITIALIZER;
50char g_outbuf[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; 50typedef struct {
51static size_t g_outbuf_data; 51 uint8_t data[LIVESYNC_OUTGOING_BUFFSIZE_PEERS];
52static ot_time g_next_packet_time; 52 size_t fill;
53 ot_time next_packet_time;
54} sync_buffer;
53 55
54static pthread_t thread_id; 56static sync_buffer g_v6_buf;
55void livesync_init( ) { 57static sync_buffer g_v4_buf;
56 58
57 if( g_socket_in == -1 ) 59static pthread_t thread_id;
58 exerr( "No socket address for live sync specified." ); 60void livesync_init() {
61
62 if (g_socket_in == -1)
63 exerr("No socket address for live sync specified.");
59 64
60 /* Prepare outgoing peers buffer */ 65 /* Prepare outgoing peers buffer */
61 memcpy( g_outbuf, &g_tracker_id, sizeof( g_tracker_id ) ); 66 memcpy(g_v6_buf.data, &g_tracker_id, sizeof(g_tracker_id));
62 uint32_pack_big( g_outbuf + sizeof( g_tracker_id ), OT_SYNC_PEER); 67 memcpy(g_v4_buf.data, &g_tracker_id, sizeof(g_tracker_id));
63 g_outbuf_data = sizeof( g_tracker_id ) + sizeof( uint32_t ); 68
69 uint32_pack_big((char *)g_v6_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER6);
70 uint32_pack_big((char *)g_v4_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER4);
71
72 g_v6_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t);
73 g_v4_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t);
64 74
65 g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; 75 g_v6_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
76 g_v4_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
66 77
67 pthread_create( &thread_id, NULL, livesync_worker, NULL ); 78 pthread_create(&thread_id, NULL, livesync_worker, NULL);
68} 79}
69 80
70void livesync_deinit() { 81void livesync_deinit() {
71 if( g_socket_in != -1 ) 82 if (g_socket_in != -1)
72 close( g_socket_in ); 83 close(g_socket_in);
73 if( g_socket_out != -1 ) 84 if (g_socket_out != -1)
74 close( g_socket_out ); 85 close(g_socket_out);
75 86
76 pthread_cancel( thread_id ); 87 pthread_cancel(thread_id);
77} 88}
78 89
79void livesync_bind_mcast( ot_ip6 ip, uint16_t port) { 90void livesync_bind_mcast(ot_ip6 ip, uint16_t port) {
80 char tmpip[4] = {0,0,0,0}; 91 char tmpip[4] = {0, 0, 0, 0};
81 char *v4ip; 92 char *v4ip;
82 93
83 if( !ip6_isv4mapped(ip)) 94 if (!ip6_isv4mapped(ip))
84 exerr("v6 mcast support not yet available."); 95 exerr("v6 mcast support not yet available.");
85 v4ip = ip+12; 96 v4ip = ip + 12;
86 97
87 if( g_socket_in != -1 ) 98 if (g_socket_in != -1)
88 exerr("Error: Livesync listen ip specified twice."); 99 exerr("Error: Livesync listen ip specified twice.");
89 100
90 if( ( g_socket_in = socket_udp4( )) < 0) 101 if ((g_socket_in = socket_udp4()) < 0)
91 exerr("Error: Cant create live sync incoming socket." ); 102 exerr("Error: Cant create live sync incoming socket.");
92 ndelay_off(g_socket_in); 103 ndelay_off(g_socket_in);
93 104
94 if( socket_bind4_reuse( g_socket_in, tmpip, port ) == -1 ) 105 if (socket_bind4_reuse(g_socket_in, tmpip, port) == -1)
95 exerr("Error: Cant bind live sync incoming socket." ); 106 exerr("Error: Cant bind live sync incoming socket.");
96 107
97 if( socket_mcjoin4( g_socket_in, groupip_1, v4ip ) ) 108 if (socket_mcjoin4(g_socket_in, groupip_1, v4ip))
98 exerr("Error: Cant make live sync incoming socket join mcast group."); 109 exerr("Error: Cant make live sync incoming socket join mcast group.");
99 110
100 if( ( g_socket_out = socket_udp4()) < 0) 111 if ((g_socket_out = socket_udp4()) < 0)
101 exerr("Error: Cant create live sync outgoing socket." ); 112 exerr("Error: Cant create live sync outgoing socket.");
102 if( socket_bind4_reuse( g_socket_out, v4ip, port ) == -1 ) 113 if (socket_bind4_reuse(g_socket_out, v4ip, port) == -1)
103 exerr("Error: Cant bind live sync outgoing socket." ); 114 exerr("Error: Cant bind live sync outgoing socket.");
104 115
105 socket_mcttl4(g_socket_out, 1); 116 socket_mcttl4(g_socket_out, 1);
106 socket_mcloop4(g_socket_out, 0); 117 socket_mcloop4(g_socket_out, 0);
107} 118}
108 119
109/* Caller MUST hold g_outbuf_mutex. Returns with g_outbuf_mutex unlocked */ 120/* Caller MUST hold g_outbuf_mutex. Returns with g_outbuf_mutex unlocked */
110static void livesync_issue_peersync( ) { 121static void livesync_issue_peersync(sync_buffer *buf) {
111 char mycopy[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; 122 char mycopy[LIVESYNC_OUTGOING_BUFFSIZE_PEERS];
112 size_t data = g_outbuf_data; 123 size_t fill = buf->fill;
113 124
114 memcpy( mycopy, g_outbuf, data ); 125 memcpy(mycopy, buf->data, fill);
115 g_outbuf_data = sizeof( g_tracker_id ) + sizeof( uint32_t ); 126 buf->fill = sizeof(g_tracker_id) + sizeof(uint32_t);
116 g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; 127 buf->next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
117 128
118 /* From now this thread has a local copy of the buffer and 129 /* From now this thread has a local copy of the buffer and
119 has modified the protected element */ 130 has modified the protected element */
120 pthread_mutex_unlock(&g_outbuf_mutex); 131 pthread_mutex_unlock(&g_outbuf_mutex);
121 132
122 socket_send4(g_socket_out, mycopy, data, groupip_1, LIVESYNC_PORT); 133 socket_send4(g_socket_out, mycopy, fill, groupip_1, LIVESYNC_PORT);
123} 134}
124 135
125static void livesync_handle_peersync( struct ot_workstruct *ws ) { 136static void livesync_handle_peersync(struct ot_workstruct *ws, size_t peer_size) {
126 int off = sizeof( g_tracker_id ) + sizeof( uint32_t ); 137 size_t off = sizeof(g_tracker_id) + sizeof(uint32_t);
127 138
128 /* Now basic sanity checks have been done on the live sync packet 139 /* Now basic sanity checks have been done on the live sync packet
129 We might add more testing and logging. */ 140 We might add more testing and logging. */
130 while( off + (ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ) <= ws->request_size ) { 141 while ((ssize_t)(off + sizeof(ot_hash) + peer_size) <= ws->request_size) {
131 memcpy( &ws->peer, ws->request + off + sizeof(ot_hash), sizeof( ot_peer ) ); 142 memcpy(&ws->peer, ws->request + off + sizeof(ot_hash), peer_size);
132 ws->hash = (ot_hash*)(ws->request + off); 143 ws->hash = (ot_hash *)(ws->request + off);
133 144
134 if( !g_opentracker_running ) return; 145 if (!g_opentracker_running)
146 return;
135 147
136 if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_STOPPED ) 148 if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED)
137 remove_peer_from_torrent( FLAG_MCA, ws ); 149 remove_peer_from_torrent(FLAG_MCA, ws);
138 else 150 else
139 add_peer_to_torrent_and_return_peers( FLAG_MCA, ws, /* amount = */ 0 ); 151 add_peer_to_torrent_and_return_peers(FLAG_MCA, ws, /* amount = */ 0);
140 152
141 off += sizeof( ot_hash ) + sizeof( ot_peer ); 153 off += sizeof(ot_hash) + peer_size;
142 } 154 }
143 155
144 stats_issue_event(EVENT_SYNC, 0, 156 stats_issue_event(EVENT_SYNC, 0, (ws->request_size - sizeof(g_tracker_id) - sizeof(uint32_t)) / ((ssize_t)sizeof(ot_hash) + peer_size));
145 (ws->request_size - sizeof( g_tracker_id ) - sizeof( uint32_t ) ) /
146 ((ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer )));
147} 157}
148 158
149/* Tickle the live sync module from time to time, so no events get 159/* Tickle the live sync module from time to time, so no events get
150 stuck when there's not enough traffic to fill udp packets fast 160 stuck when there's not enough traffic to fill udp packets fast
151 enough */ 161 enough */
152void livesync_ticker( ) { 162void livesync_ticker() {
153 /* livesync_issue_peersync sets g_next_packet_time */ 163 /* livesync_issue_peersync sets g_next_packet_time */
154 pthread_mutex_lock(&g_outbuf_mutex); 164 pthread_mutex_lock(&g_outbuf_mutex);
155 if( g_now_seconds > g_next_packet_time && 165 if (g_now_seconds > g_v6_buf.next_packet_time && g_v6_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t))
156 g_outbuf_data > sizeof( g_tracker_id ) + sizeof( uint32_t ) ) 166 livesync_issue_peersync(&g_v6_buf);
157 livesync_issue_peersync(); 167 else
168 pthread_mutex_unlock(&g_outbuf_mutex);
169
170 pthread_mutex_lock(&g_outbuf_mutex);
171 if (g_now_seconds > g_v4_buf.next_packet_time && g_v4_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t))
172 livesync_issue_peersync(&g_v4_buf);
158 else 173 else
159 pthread_mutex_unlock(&g_outbuf_mutex); 174 pthread_mutex_unlock(&g_outbuf_mutex);
160} 175}
161 176
162/* Inform live sync about whats going on. */ 177/* Inform live sync about whats going on. */
163void livesync_tell( struct ot_workstruct *ws ) { 178void livesync_tell(struct ot_workstruct *ws) {
179 size_t peer_size; /* initialized in next line */
180 ot_peer *peer_src = peer_from_peer6(&ws->peer, &peer_size);
181 sync_buffer *dest_buf = peer_size == OT_PEER_SIZE6 ? &g_v6_buf : &g_v4_buf;
182
164 pthread_mutex_lock(&g_outbuf_mutex); 183 pthread_mutex_lock(&g_outbuf_mutex);
165 184
166 memcpy( g_outbuf + g_outbuf_data, ws->hash, sizeof(ot_hash) ); 185 memcpy(dest_buf->data + dest_buf->fill, ws->hash, sizeof(ot_hash));
167 memcpy( g_outbuf + g_outbuf_data + sizeof(ot_hash), &ws->peer, sizeof(ot_peer) ); 186 dest_buf->fill += sizeof(ot_hash);
168 187
169 g_outbuf_data += sizeof(ot_hash) + sizeof(ot_peer); 188 memcpy(dest_buf->data + dest_buf->fill, peer_src, peer_size);
189 dest_buf->fill += peer_size;
170 190
171 if( g_outbuf_data >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS ) 191 if (dest_buf->fill >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS)
172 livesync_issue_peersync(); 192 livesync_issue_peersync(dest_buf);
173 else 193 else
174 pthread_mutex_unlock(&g_outbuf_mutex); 194 pthread_mutex_unlock(&g_outbuf_mutex);
175} 195}
176 196
177static void * livesync_worker( void * args ) { 197static void *livesync_worker(void *args) {
178 struct ot_workstruct ws; 198 struct ot_workstruct ws;
179 ot_ip6 in_ip; uint16_t in_port; 199 ot_ip6 in_ip;
200 uint16_t in_port;
180 201
181 (void)args; 202 (void)args;
182 203
183 /* Initialize our "thread local storage" */ 204 /* Initialize our "thread local storage" */
184 ws.inbuf = ws.request = malloc( LIVESYNC_INCOMING_BUFFSIZE ); 205 ws.inbuf = ws.request = malloc(LIVESYNC_INCOMING_BUFFSIZE);
185 ws.outbuf = ws.reply = 0; 206 ws.outbuf = ws.reply = 0;
186 207
187 memcpy( in_ip, V4mappedprefix, sizeof( V4mappedprefix ) ); 208 memcpy(in_ip, V4mappedprefix, sizeof(V4mappedprefix));
188 209
189 while( 1 ) { 210 while (1) {
190 ws.request_size = socket_recv4(g_socket_in, (char*)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12+(char*)in_ip, &in_port); 211 ws.request_size = socket_recv4(g_socket_in, (char *)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12 + (char *)in_ip, &in_port);
191 212
192 /* Expect at least tracker id and packet type */ 213 /* Expect at least tracker id and packet type */
193 if( ws.request_size <= (ssize_t)(sizeof( g_tracker_id ) + sizeof( uint32_t )) ) 214 if (ws.request_size <= (ssize_t)(sizeof(g_tracker_id) + sizeof(uint32_t)))
194 continue; 215 continue;
195 if( !accesslist_isblessed(in_ip, OT_PERMISSION_MAY_LIVESYNC)) 216 if (!accesslist_is_blessed(in_ip, OT_PERMISSION_MAY_LIVESYNC))
196 continue; 217 continue;
197 if( !memcmp( ws.inbuf, &g_tracker_id, sizeof( g_tracker_id ) ) ) { 218 if (!memcmp(ws.inbuf, &g_tracker_id, sizeof(g_tracker_id))) {
198 /* TODO: log packet coming from ourselves */ 219 /* TODO: log packet coming from ourselves */
199 continue; 220 continue;
200 } 221 }
201 222
202 switch( uint32_read_big( sizeof( g_tracker_id ) + (char *)ws.inbuf ) ) { 223 switch (uint32_read_big(sizeof(g_tracker_id) + (char *)ws.inbuf)) {
203 case OT_SYNC_PEER: 224 case OT_SYNC_PEER6:
204 livesync_handle_peersync( &ws ); 225 livesync_handle_peersync(&ws, OT_PEER_SIZE6);
226 break;
227 case OT_SYNC_PEER4:
228 livesync_handle_peersync(&ws, OT_PEER_SIZE4);
205 break; 229 break;
206 default: 230 default:
207 break; 231 break;
@@ -213,4 +237,3 @@ static void * livesync_worker( void * args ) {
213} 237}
214 238
215#endif 239#endif
216const char *g_version_livesync_c = "$Source$: $Revision$\n";
diff --git a/ot_livesync.h b/ot_livesync.h
index d7490e5..cb28774 100644
--- a/ot_livesync.h
+++ b/ot_livesync.h
@@ -28,13 +28,19 @@
28 Each tracker instance accumulates announce requests until its buffer is 28 Each tracker instance accumulates announce requests until its buffer is
29 full or a timeout is reached. Then it broadcasts its live sync packer: 29 full or a timeout is reached. Then it broadcasts its live sync packer:
30 30
31 packet type SYNC_LIVE 31 packet type SYNC_LIVE4
32 [ 0x0008 0x14 info_hash 32 [ 0x0008 0x14 info_hash
33 0x001c 0x04 peer's ipv4 address 33 0x001c 0x04 peer's ipv4 address
34 0x0020 0x02 peer's port 34 0x0020 0x02 peer's port
35 0x0024 0x02 peer flags v1 ( SEEDING = 0x80, COMPLETE = 0x40, STOPPED = 0x20 ) 35 0x0024 0x02 peer flags v1 ( SEEDING = 0x80, COMPLETE = 0x40, STOPPED = 0x20 )
36 ]* 36 ]*
37 37
38 packet type SYNC_LIVE6
39 [ 0x0008 0x14 info_hash
40 0x001c 0x10 peer's ipv6 address
41 0x002c 0x02 peer's port
42 0x002e 0x02 peer flags v1 ( SEEDING = 0x80, COMPLETE = 0x40, STOPPED = 0x20 )
43 ]*
38*/ 44*/
39 45
40#ifdef WANT_SYNC_LIVE 46#ifdef WANT_SYNC_LIVE
@@ -45,18 +51,18 @@ void livesync_init();
45void livesync_deinit(); 51void livesync_deinit();
46 52
47/* Join multicast group for listening and create sending socket */ 53/* Join multicast group for listening and create sending socket */
48void livesync_bind_mcast( char *ip, uint16_t port ); 54void livesync_bind_mcast(char *ip, uint16_t port);
49 55
50/* Inform live sync about whats going on. */ 56/* Inform live sync about whats going on. */
51void livesync_tell( struct ot_workstruct *ws ); 57void livesync_tell(struct ot_workstruct *ws);
52 58
53/* Tickle the live sync module from time to time, so no events get 59/* Tickle the live sync module from time to time, so no events get
54 stuck when there's not enough traffic to fill udp packets fast 60 stuck when there's not enough traffic to fill udp packets fast
55 enough */ 61 enough */
56void livesync_ticker( ); 62void livesync_ticker();
57 63
58/* Handle an incoming live sync packet */ 64/* Handle an incoming live sync packet */
59void handle_livesync( const int64 sock ); 65void handle_livesync(const int64 sock);
60 66
61#else 67#else
62 68
diff --git a/ot_mutex.c b/ot_mutex.c
index 772d936..3011987 100644
--- a/ot_mutex.c
+++ b/ot_mutex.c
@@ -16,100 +16,39 @@
16#include "uint32.h" 16#include "uint32.h"
17 17
18/* Opentracker */ 18/* Opentracker */
19#include "trackerlogic.h" 19#include "ot_iovec.h"
20#include "ot_mutex.h" 20#include "ot_mutex.h"
21#include "ot_stats.h" 21#include "ot_stats.h"
22#include "trackerlogic.h"
22 23
23/* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */ 24/* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */
24#define MTX_DBG( STRING ) 25#define MTX_DBG(STRING)
25 26
26/* Our global all torrents list */ 27/* Our global all torrents list */
27static ot_vector all_torrents[OT_BUCKET_COUNT]; 28static ot_vector all_torrents[OT_BUCKET_COUNT];
28static size_t g_torrent_count; 29static pthread_mutex_t bucket_mutex[OT_BUCKET_COUNT];
29 30static size_t g_torrent_count;
30/* Bucket Magic */
31static int bucket_locklist[ OT_MAX_THREADS ];
32static int bucket_locklist_count = 0;
33static pthread_mutex_t bucket_mutex;
34static pthread_cond_t bucket_being_unlocked;
35 31
36/* Self pipe from opentracker.c */ 32/* Self pipe from opentracker.c */
37extern int g_self_pipe[2]; 33extern int g_self_pipe[2];
38
39static int bucket_check( int bucket ) {
40 /* C should come with auto-i ;) */
41 int i;
42
43 /* No more space to acquire lock to bucket -- should not happen */
44 if( bucket_locklist_count == OT_MAX_THREADS ) {
45 fprintf( stderr, "More lock requests than mutexes. Consult source code.\n" );
46 return -1;
47 }
48
49 /* See, if bucket is already locked */
50 for( i=0; i<bucket_locklist_count; ++i )
51 if( bucket_locklist[ i ] == bucket ) {
52 stats_issue_event( EVENT_BUCKET_LOCKED, 0, 0 );
53 return -1;
54 }
55
56 return 0;
57}
58
59static void bucket_push( int bucket ) {
60 bucket_locklist[ bucket_locklist_count++ ] = bucket;
61}
62 34
63static void bucket_remove( int bucket ) { 35ot_vector *mutex_bucket_lock(int bucket) {
64 int i = 0; 36 pthread_mutex_lock(bucket_mutex + bucket);
65
66 while( ( i < bucket_locklist_count ) && ( bucket_locklist[ i ] != bucket ) )
67 ++i;
68
69 if( i == bucket_locklist_count ) {
70 fprintf( stderr, "Request to unlock bucket that was never locked. Consult source code.\n" );
71 return;
72 }
73
74 for( ; i < bucket_locklist_count - 1; ++i )
75 bucket_locklist[ i ] = bucket_locklist[ i + 1 ];
76
77 --bucket_locklist_count;
78}
79
80/* Can block */
81ot_vector *mutex_bucket_lock( int bucket ) {
82 pthread_mutex_lock( &bucket_mutex );
83 while( bucket_check( bucket ) )
84 pthread_cond_wait( &bucket_being_unlocked, &bucket_mutex );
85 bucket_push( bucket );
86 pthread_mutex_unlock( &bucket_mutex );
87 return all_torrents + bucket; 37 return all_torrents + bucket;
88} 38}
89 39
90ot_vector *mutex_bucket_lock_by_hash( ot_hash hash ) { 40ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash) { return mutex_bucket_lock(uint32_read_big((const char *)hash) >> OT_BUCKET_COUNT_SHIFT); }
91 return mutex_bucket_lock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT );
92}
93 41
94void mutex_bucket_unlock( int bucket, int delta_torrentcount ) { 42void mutex_bucket_unlock(int bucket, int delta_torrentcount) {
95 pthread_mutex_lock( &bucket_mutex ); 43 pthread_mutex_unlock(bucket_mutex + bucket);
96 bucket_remove( bucket );
97 g_torrent_count += delta_torrentcount; 44 g_torrent_count += delta_torrentcount;
98 pthread_cond_broadcast( &bucket_being_unlocked );
99 pthread_mutex_unlock( &bucket_mutex );
100} 45}
101 46
102void mutex_bucket_unlock_by_hash( ot_hash hash, int delta_torrentcount ) { 47void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount) {
103 mutex_bucket_unlock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount ); 48 mutex_bucket_unlock(uint32_read_big((char *)hash) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount);
104} 49}
105 50
106size_t mutex_get_torrent_count( ) { 51size_t mutex_get_torrent_count() { return g_torrent_count; }
107 size_t torrent_count;
108 pthread_mutex_lock( &bucket_mutex );
109 torrent_count = g_torrent_count;
110 pthread_mutex_unlock( &bucket_mutex );
111 return torrent_count;
112}
113 52
114/* TaskQueue Magic */ 53/* TaskQueue Magic */
115 54
@@ -122,32 +61,17 @@ struct ot_task {
122 struct ot_task *next; 61 struct ot_task *next;
123}; 62};
124 63
125static ot_taskid next_free_taskid = 1; 64static ot_taskid next_free_taskid = 1;
126static struct ot_task *tasklist; 65static struct ot_task *tasklist;
127static pthread_mutex_t tasklist_mutex; 66static pthread_mutex_t tasklist_mutex;
128static pthread_cond_t tasklist_being_filled; 67static pthread_cond_t tasklist_being_filled;
129 68
130int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) { 69int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype) {
131 struct ot_task ** tmptask, * task; 70 struct ot_task **tmptask, *task;
132 71
133 /* Want exclusive access to tasklist */ 72 task = malloc(sizeof(struct ot_task));
134 MTX_DBG( "pushtask locks.\n" ); 73 if (!task)
135 pthread_mutex_lock( &tasklist_mutex );
136 MTX_DBG( "pushtask locked.\n" );
137
138 task = malloc(sizeof( struct ot_task));
139 if( !task ) {
140 MTX_DBG( "pushtask fail unlocks.\n" );
141 pthread_mutex_unlock( &tasklist_mutex );
142 MTX_DBG( "pushtask fail unlocked.\n" );
143 return -1; 74 return -1;
144 }
145
146 /* Skip to end of list */
147 tmptask = &tasklist;
148 while( *tmptask )
149 tmptask = &(*tmptask)->next;
150 *tmptask = task;
151 75
152 task->taskid = 0; 76 task->taskid = 0;
153 task->tasktype = tasktype; 77 task->tasktype = tasktype;
@@ -156,181 +80,193 @@ int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) {
156 task->iovec = NULL; 80 task->iovec = NULL;
157 task->next = 0; 81 task->next = 0;
158 82
83 /* Want exclusive access to tasklist */
84 pthread_mutex_lock(&tasklist_mutex);
85
86 /* Skip to end of list */
87 tmptask = &tasklist;
88 while (*tmptask)
89 tmptask = &(*tmptask)->next;
90 *tmptask = task;
91
159 /* Inform waiting workers and release lock */ 92 /* Inform waiting workers and release lock */
160 MTX_DBG( "pushtask broadcasts.\n" ); 93 pthread_cond_broadcast(&tasklist_being_filled);
161 pthread_cond_broadcast( &tasklist_being_filled ); 94 pthread_mutex_unlock(&tasklist_mutex);
162 MTX_DBG( "pushtask broadcasted, mutex unlocks.\n" );
163 pthread_mutex_unlock( &tasklist_mutex );
164 MTX_DBG( "pushtask end mutex unlocked.\n" );
165 return 0; 95 return 0;
166} 96}
167 97
168void mutex_workqueue_canceltask( int64 sock ) { 98void mutex_workqueue_canceltask(int64 sock) {
169 struct ot_task ** task; 99 struct ot_task **task;
170 100
171 /* Want exclusive access to tasklist */ 101 /* Want exclusive access to tasklist */
172 MTX_DBG( "canceltask locks.\n" ); 102 pthread_mutex_lock(&tasklist_mutex);
173 pthread_mutex_lock( &tasklist_mutex );
174 MTX_DBG( "canceltask locked.\n" );
175 103
176 task = &tasklist; 104 for (task = &tasklist; *task; task = &((*task)->next))
177 while( *task && ( (*task)->sock != sock ) ) 105 if ((*task)->sock == sock) {
178 *task = (*task)->next; 106 struct iovec *iovec = (*task)->iovec;
107 struct ot_task *ptask = *task;
108 int i;
179 109
180 if( *task && ( (*task)->sock == sock ) ) { 110 /* Free task's iovec */
181 struct iovec *iovec = (*task)->iovec; 111 for (i = 0; i < (*task)->iovec_entries; ++i)
182 struct ot_task *ptask = *task; 112 free(iovec[i].iov_base);
183 int i;
184 113
185 /* Free task's iovec */ 114 *task = (*task)->next;
186 for( i=0; i<(*task)->iovec_entries; ++i ) 115 free(ptask);
187 munmap( iovec[i].iov_base, iovec[i].iov_len ); 116 break;
188 117 }
189 *task = (*task)->next;
190 free( ptask );
191 }
192 118
193 /* Release lock */ 119 /* Release lock */
194 MTX_DBG( "canceltask unlocks.\n" ); 120 pthread_mutex_unlock(&tasklist_mutex);
195 pthread_mutex_unlock( &tasklist_mutex );
196 MTX_DBG( "canceltask unlocked.\n" );
197} 121}
198 122
199ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ) { 123ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype) {
200 struct ot_task * task; 124 struct ot_task *task;
201 ot_taskid taskid = 0; 125 ot_taskid taskid = 0;
202 126
203 /* Want exclusive access to tasklist */ 127 /* Want exclusive access to tasklist */
204 MTX_DBG( "poptask mutex locks.\n" ); 128 pthread_mutex_lock(&tasklist_mutex);
205 pthread_mutex_lock( &tasklist_mutex );
206 MTX_DBG( "poptask mutex locked.\n" );
207 129
208 while( !taskid ) { 130 while (!taskid) {
209 /* Skip to the first unassigned task this worker wants to do */ 131 /* Skip to the first unassigned task this worker wants to do */
210 task = tasklist; 132 for (task = tasklist; task; task = task->next)
211 while( task && ( ( ( TASK_CLASS_MASK & task->tasktype ) != *tasktype ) || task->taskid ) ) 133 if (!task->taskid && (TASK_CLASS_MASK & task->tasktype) == *tasktype) {
212 task = task->next; 134 /* If we found an outstanding task, assign a taskid to it
213 135 and leave the loop */
214 /* If we found an outstanding task, assign a taskid to it 136 task->taskid = taskid = ++next_free_taskid;
215 and leave the loop */ 137 *tasktype = task->tasktype;
216 if( task ) { 138 break;
217 task->taskid = taskid = ++next_free_taskid; 139 }
218 *tasktype = task->tasktype; 140
219 } else { 141 /* Wait until the next task is being fed */
220 /* Wait until the next task is being fed */ 142 if (!taskid)
221 MTX_DBG( "poptask cond waits.\n" ); 143 pthread_cond_wait(&tasklist_being_filled, &tasklist_mutex);
222 pthread_cond_wait( &tasklist_being_filled, &tasklist_mutex );
223 MTX_DBG( "poptask cond waited.\n" );
224 }
225 } 144 }
226 145
227 /* Release lock */ 146 /* Release lock */
228 MTX_DBG( "poptask end mutex unlocks.\n" ); 147 pthread_mutex_unlock(&tasklist_mutex);
229 pthread_mutex_unlock( &tasklist_mutex );
230 MTX_DBG( "poptask end mutex unlocked.\n" );
231 148
232 return taskid; 149 return taskid;
233} 150}
234 151
235void mutex_workqueue_pushsuccess( ot_taskid taskid ) { 152void mutex_workqueue_pushsuccess(ot_taskid taskid) {
236 struct ot_task ** task; 153 struct ot_task **task;
237 154
238 /* Want exclusive access to tasklist */ 155 /* Want exclusive access to tasklist */
239 MTX_DBG( "pushsuccess locks.\n" ); 156 pthread_mutex_lock(&tasklist_mutex);
240 pthread_mutex_lock( &tasklist_mutex ); 157
241 MTX_DBG( "pushsuccess locked.\n" ); 158 for (task = &tasklist; *task; task = &((*task)->next))
242 159 if ((*task)->taskid == taskid) {
243 task = &tasklist; 160 struct ot_task *ptask = *task;
244 while( *task && ( (*task)->taskid != taskid ) ) 161 *task = (*task)->next;
245 *task = (*task)->next; 162 free(ptask);
246 163 break;
247 if( *task && ( (*task)->taskid == taskid ) ) { 164 }
248 struct ot_task *ptask = *task;
249 *task = (*task)->next;
250 free( ptask );
251 }
252 165
253 /* Release lock */ 166 /* Release lock */
254 MTX_DBG( "pushsuccess unlocks.\n" ); 167 pthread_mutex_unlock(&tasklist_mutex);
255 pthread_mutex_unlock( &tasklist_mutex );
256 MTX_DBG( "pushsuccess unlocked.\n" );
257} 168}
258 169
259int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovec ) { 170int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovec) {
260 struct ot_task * task; 171 struct ot_task *task;
261 const char byte = 'o'; 172 const char byte = 'o';
262 173
263 /* Want exclusive access to tasklist */ 174 /* Want exclusive access to tasklist */
264 MTX_DBG( "pushresult locks.\n" ); 175 pthread_mutex_lock(&tasklist_mutex);
265 pthread_mutex_lock( &tasklist_mutex ); 176
266 MTX_DBG( "pushresult locked.\n" ); 177 for (task = tasklist; task; task = task->next)
267 178 if (task->taskid == taskid) {
268 task = tasklist; 179 task->iovec_entries = iovec_entries;
269 while( task && ( task->taskid != taskid ) ) 180 task->iovec = iovec;
270 task = task->next; 181 task->tasktype = TASK_DONE;
271 182 break;
272 if( task ) { 183 }
273 task->iovec_entries = iovec_entries;
274 task->iovec = iovec;
275 task->tasktype = TASK_DONE;
276 }
277 184
278 /* Release lock */ 185 /* Release lock */
279 MTX_DBG( "pushresult unlocks.\n" ); 186 pthread_mutex_unlock(&tasklist_mutex);
280 pthread_mutex_unlock( &tasklist_mutex );
281 MTX_DBG( "pushresult unlocked.\n" );
282 187
283 io_trywrite( g_self_pipe[1], &byte, 1 ); 188 io_trywrite(g_self_pipe[1], &byte, 1);
284 189
285 /* Indicate whether the worker has to throw away results */ 190 /* Indicate whether the worker has to throw away results */
286 return task ? 0 : -1; 191 return task ? 0 : -1;
287} 192}
288 193
289int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) { 194int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec) {
290 struct ot_task ** task; 195 struct ot_task *task;
291 int64 sock = -1; 196 const char byte = 'o';
292 197
293 /* Want exclusive access to tasklist */ 198 /* Want exclusive access to tasklist */
294 MTX_DBG( "popresult locks.\n" ); 199 pthread_mutex_lock(&tasklist_mutex);
295 pthread_mutex_lock( &tasklist_mutex ); 200
296 MTX_DBG( "popresult locked.\n" ); 201 for (task = tasklist; task; task = task->next)
202 if (task->taskid == taskid) {
203 if (iovec) {
204 if (iovec_append(&task->iovec_entries, &task->iovec, iovec))
205 task->tasktype = TASK_DONE_PARTIAL;
206 else
207 task = NULL;
208 } else
209 task->tasktype = TASK_DONE;
210 break;
211 }
297 212
298 task = &tasklist; 213 /* Release lock */
299 while( *task && ( (*task)->tasktype != TASK_DONE ) ) 214 pthread_mutex_unlock(&tasklist_mutex);
300 task = &(*task)->next;
301 215
302 if( *task && ( (*task)->tasktype == TASK_DONE ) ) { 216 io_trywrite(g_self_pipe[1], &byte, 1);
303 struct ot_task *ptask = *task;
304 217
305 *iovec_entries = (*task)->iovec_entries; 218 /* Indicate whether the worker has to throw away results */
306 *iovec = (*task)->iovec; 219 return task ? 0 : -1;
307 sock = (*task)->sock; 220}
308 221
309 *task = (*task)->next; 222int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovec, int *is_partial) {
310 free( ptask ); 223 struct ot_task **task;
311 } 224 int64 sock = -1;
225
226 *is_partial = 0;
227
228 /* Want exclusive access to tasklist */
229 pthread_mutex_lock(&tasklist_mutex);
230
231 for (task = &tasklist; *task; task = &((*task)->next))
232 if (((*task)->tasktype & TASK_CLASS_MASK) == TASK_DONE) {
233 struct ot_task *ptask = *task;
234 *iovec_entries = ptask->iovec_entries;
235 *iovec = ptask->iovec;
236 sock = ptask->sock;
237
238 if ((*task)->tasktype == TASK_DONE) {
239 *task = ptask->next;
240 free(ptask);
241 } else {
242 ptask->iovec_entries = 0;
243 ptask->iovec = NULL;
244 *is_partial = 1;
245 /* Prevent task from showing up immediately again unless new data was added */
246 (*task)->tasktype = TASK_FULLSCRAPE;
247 }
248 break;
249 }
312 250
313 /* Release lock */ 251 /* Release lock */
314 MTX_DBG( "popresult unlocks.\n" ); 252 pthread_mutex_unlock(&tasklist_mutex);
315 pthread_mutex_unlock( &tasklist_mutex );
316 MTX_DBG( "popresult unlocked.\n" );
317 return sock; 253 return sock;
318} 254}
319 255
320void mutex_init( ) { 256void mutex_init() {
257 int i;
321 pthread_mutex_init(&tasklist_mutex, NULL); 258 pthread_mutex_init(&tasklist_mutex, NULL);
322 pthread_cond_init (&tasklist_being_filled, NULL); 259 pthread_cond_init(&tasklist_being_filled, NULL);
323 pthread_mutex_init(&bucket_mutex, NULL); 260 for (i = 0; i < OT_BUCKET_COUNT; ++i)
324 pthread_cond_init (&bucket_being_unlocked, NULL); 261 pthread_mutex_init(bucket_mutex + i, NULL);
325 byte_zero( all_torrents, sizeof( all_torrents ) ); 262 byte_zero(all_torrents, sizeof(all_torrents));
326} 263}
327 264
328void mutex_deinit( ) { 265void mutex_deinit() {
329 pthread_mutex_destroy(&bucket_mutex); 266 int i;
330 pthread_cond_destroy(&bucket_being_unlocked); 267 for (i = 0; i < OT_BUCKET_COUNT; ++i)
268 pthread_mutex_destroy(bucket_mutex + i);
331 pthread_mutex_destroy(&tasklist_mutex); 269 pthread_mutex_destroy(&tasklist_mutex);
332 pthread_cond_destroy(&tasklist_being_filled); 270 pthread_cond_destroy(&tasklist_being_filled);
333 byte_zero( all_torrents, sizeof( all_torrents ) ); 271 byte_zero(all_torrents, sizeof(all_torrents));
334} 272}
335
336const char *g_version_mutex_c = "$Source$: $Revision$\n";
diff --git a/ot_mutex.h b/ot_mutex.h
index bd07009..cdfabc9 100644
--- a/ot_mutex.h
+++ b/ot_mutex.h
@@ -7,69 +7,74 @@
7#define OT_MUTEX_H__ 7#define OT_MUTEX_H__
8 8
9#include <sys/uio.h> 9#include <sys/uio.h>
10#include "trackerlogic.h"
10 11
11void mutex_init( ); 12void mutex_init(void);
12void mutex_deinit( ); 13void mutex_deinit(void);
13 14
14ot_vector *mutex_bucket_lock( int bucket ); 15ot_vector *mutex_bucket_lock(int bucket);
15ot_vector *mutex_bucket_lock_by_hash( ot_hash hash ); 16ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash);
16 17
17void mutex_bucket_unlock( int bucket, int delta_torrentcount ); 18void mutex_bucket_unlock(int bucket, int delta_torrentcount);
18void mutex_bucket_unlock_by_hash( ot_hash hash, int delta_torrentcount ); 19void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount);
19 20
20size_t mutex_get_torrent_count(); 21size_t mutex_get_torrent_count(void);
21 22
22typedef enum { 23typedef enum {
23 TASK_STATS_CONNS = 0x0001, 24 TASK_STATS_CONNS = 0x0001,
24 TASK_STATS_TCP = 0x0002, 25 TASK_STATS_TCP = 0x0002,
25 TASK_STATS_UDP = 0x0003, 26 TASK_STATS_UDP = 0x0003,
26 TASK_STATS_SCRAPE = 0x0004, 27 TASK_STATS_SCRAPE = 0x0004,
27 TASK_STATS_FULLSCRAPE = 0x0005, 28 TASK_STATS_FULLSCRAPE = 0x0005,
28 TASK_STATS_TPB = 0x0006, 29 TASK_STATS_TPB = 0x0006,
29 TASK_STATS_HTTPERRORS = 0x0007, 30 TASK_STATS_HTTPERRORS = 0x0007,
30 TASK_STATS_VERSION = 0x0008, 31 TASK_STATS_VERSION = 0x0008,
31 TASK_STATS_BUSY_NETWORKS = 0x0009, 32 TASK_STATS_BUSY_NETWORKS = 0x0009,
32 TASK_STATS_RENEW = 0x000a, 33 TASK_STATS_RENEW = 0x000a,
33 TASK_STATS_SYNCS = 0x000b, 34 TASK_STATS_SYNCS = 0x000b,
34 TASK_STATS_COMPLETED = 0x000c, 35 TASK_STATS_COMPLETED = 0x000c,
35 TASK_STATS_NUMWANTS = 0x000d, 36 TASK_STATS_NUMWANTS = 0x000d,
36 37
37 TASK_STATS = 0x0100, /* Mask */ 38 TASK_STATS = 0x0100, /* Mask */
38 TASK_STATS_TORRENTS = 0x0101, 39 TASK_STATS_TORRENTS = 0x0101,
39 TASK_STATS_PEERS = 0x0102, 40 TASK_STATS_PEERS = 0x0102,
40 TASK_STATS_SLASH24S = 0x0103, 41 TASK_STATS_SLASH24S = 0x0103,
41 TASK_STATS_TOP10 = 0x0104, 42 TASK_STATS_TOP10 = 0x0104,
42 TASK_STATS_TOP100 = 0x0105, 43 TASK_STATS_TOP100 = 0x0105,
43 TASK_STATS_EVERYTHING = 0x0106, 44 TASK_STATS_EVERYTHING = 0x0106,
44 TASK_STATS_FULLLOG = 0x0107, 45 TASK_STATS_FULLLOG = 0x0107,
45 TASK_STATS_WOODPECKERS = 0x0108, 46 TASK_STATS_WOODPECKERS = 0x0108,
46 47
47 TASK_FULLSCRAPE = 0x0200, /* Default mode */ 48 TASK_FULLSCRAPE = 0x0200, /* Default mode */
48 TASK_FULLSCRAPE_TPB_BINARY = 0x0201, 49 TASK_FULLSCRAPE_TPB_BINARY = 0x0201,
49 TASK_FULLSCRAPE_TPB_ASCII = 0x0202, 50 TASK_FULLSCRAPE_TPB_ASCII = 0x0202,
50 TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203, 51 TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203,
51 TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204, 52 TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204,
52 TASK_FULLSCRAPE_TRACKERSTATE = 0x0205, 53 TASK_FULLSCRAPE_TRACKERSTATE = 0x0205,
53 54
54 TASK_DMEM = 0x0300, 55 TASK_DMEM = 0x0300,
55 56
56 TASK_DONE = 0x0f00, 57 TASK_DONE = 0x0f00,
57 58 TASK_DONE_PARTIAL = 0x0f01,
58 TASK_FLAG_GZIP = 0x1000, 59
59 TASK_FLAG_BZIP2 = 0x2000, 60 TASK_FLAG_GZIP = 0x1000,
60 61 TASK_FLAG_BZIP2 = 0x2000,
61 TASK_TASK_MASK = 0x0fff, 62 TASK_FLAG_ZSTD = 0x4000,
62 TASK_CLASS_MASK = 0x0f00, 63 TASK_FLAG_CHUNKED = 0x8000,
63 TASK_FLAGS_MASK = 0xf000 64
65 TASK_TASK_MASK = 0x0fff,
66 TASK_CLASS_MASK = 0x0f00,
67 TASK_FLAGS_MASK = 0xf000
64} ot_tasktype; 68} ot_tasktype;
65 69
66typedef unsigned long ot_taskid; 70typedef unsigned long ot_taskid;
67 71
68int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ); 72int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype);
69void mutex_workqueue_canceltask( int64 sock ); 73void mutex_workqueue_canceltask(int64 sock);
70void mutex_workqueue_pushsuccess( ot_taskid taskid ); 74void mutex_workqueue_pushsuccess(ot_taskid taskid);
71ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ); 75ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype);
72int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovector ); 76int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovector);
73int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovector ); 77int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec);
78int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovector, int *is_partial);
74 79
75#endif 80#endif
diff --git a/ot_rijndael.c b/ot_rijndael.c
index f468e2f..3f36bde 100644
--- a/ot_rijndael.c
+++ b/ot_rijndael.c
@@ -486,5 +486,3 @@ void rijndaelEncrypt128(const uint32_t rk[44], const uint8_t pt[16], uint8_t ct[
486 rk[43]; 486 rk[43];
487 PUTU32(ct + 12, s3); 487 PUTU32(ct + 12, s3);
488} 488}
489
490const char *g_version_rijndael_c = "$Source$: $Revision$\n";
diff --git a/ot_stats.c b/ot_stats.c
index 83cd058..158884f 100644
--- a/ot_stats.c
+++ b/ot_stats.c
@@ -4,16 +4,16 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <stdlib.h>
8#include <arpa/inet.h> 7#include <arpa/inet.h>
9#include <sys/types.h> 8#include <inttypes.h>
10#include <sys/uio.h> 9#include <pthread.h>
11#include <sys/mman.h>
12#include <stdio.h> 10#include <stdio.h>
11#include <stdlib.h>
13#include <string.h> 12#include <string.h>
14#include <pthread.h> 13#include <sys/mman.h>
14#include <sys/types.h>
15#include <sys/uio.h>
15#include <unistd.h> 16#include <unistd.h>
16#include <inttypes.h>
17#ifdef WANT_SYSLOGS 17#ifdef WANT_SYSLOGS
18#include <syslog.h> 18#include <syslog.h>
19#endif 19#endif
@@ -25,61 +25,63 @@
25#include "ip6.h" 25#include "ip6.h"
26 26
27/* Opentracker */ 27/* Opentracker */
28#include "trackerlogic.h" 28#include "ot_accesslist.h"
29#include "ot_mutex.h"
30#include "ot_iovec.h" 29#include "ot_iovec.h"
30#include "ot_mutex.h"
31#include "ot_stats.h" 31#include "ot_stats.h"
32#include "ot_accesslist.h" 32#include "trackerlogic.h"
33 33
34#ifndef NO_FULLSCRAPE_LOGGING 34#ifndef NO_FULLSCRAPE_LOGGING
35#define LOG_TO_STDERR( ... ) fprintf( stderr, __VA_ARGS__ ) 35#define LOG_TO_STDERR(...) fprintf(stderr, __VA_ARGS__)
36#else 36#else
37#define LOG_TO_STDERR( ... ) 37#define LOG_TO_STDERR(...)
38#endif 38#endif
39 39
40/* Forward declaration */ 40/* Forward declaration */
41static void stats_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ); 41static void stats_make(int *iovec_entries, struct iovec **iovector, ot_tasktype mode);
42#define OT_STATS_TMPSIZE 8192 42#define OT_STATS_TMPSIZE 8192
43 43
44/* Clumsy counters... to be rethought */ 44/* Clumsy counters... to be rethought */
45static unsigned long long ot_overall_tcp_connections = 0; 45static unsigned long long ot_overall_tcp_connections;
46static unsigned long long ot_overall_udp_connections = 0; 46static unsigned long long ot_overall_udp_connections;
47static unsigned long long ot_overall_tcp_successfulannounces = 0; 47static unsigned long long ot_overall_tcp_successfulannounces;
48static unsigned long long ot_overall_udp_successfulannounces = 0; 48static unsigned long long ot_overall_udp_successfulannounces;
49static unsigned long long ot_overall_tcp_successfulscrapes = 0; 49static unsigned long long ot_overall_tcp_successfulscrapes;
50static unsigned long long ot_overall_udp_successfulscrapes = 0; 50static unsigned long long ot_overall_udp_successfulscrapes;
51static unsigned long long ot_overall_udp_connectionidmissmatches = 0; 51static unsigned long long ot_overall_udp_connectionidmissmatches;
52static unsigned long long ot_overall_tcp_connects = 0; 52static unsigned long long ot_overall_tcp_connects;
53static unsigned long long ot_overall_udp_connects = 0; 53static unsigned long long ot_overall_udp_connects;
54static unsigned long long ot_overall_completed = 0; 54static unsigned long long ot_overall_completed;
55static unsigned long long ot_full_scrape_count = 0; 55static unsigned long long ot_full_scrape_count;
56static unsigned long long ot_full_scrape_request_count = 0; 56static unsigned long long ot_full_scrape_request_count;
57static unsigned long long ot_full_scrape_size = 0; 57static unsigned long long ot_full_scrape_size;
58static unsigned long long ot_failed_request_counts[CODE_HTTPERROR_COUNT]; 58static unsigned long long ot_failed_request_counts[CODE_HTTPERROR_COUNT];
59static char * ot_failed_request_names[] = { "302 Redirect", "400 Parse Error", "400 Invalid Parameter", "400 Invalid Parameter (compact=0)", "400 Not Modest", "402 Payment Required", "403 Access Denied", "404 Not found", "500 Internal Server Error" }; 59static char *ot_failed_request_names[] = {
60 "302 Redirect", "400 Parse Error", "400 Invalid Parameter", "400 Invalid Parameter (compact=0)", "400 Not Modest",
61 "402 Payment Required", "403 Access Denied", "404 Not found", "500 Internal Server Error"};
60static unsigned long long ot_renewed[OT_PEER_TIMEOUT]; 62static unsigned long long ot_renewed[OT_PEER_TIMEOUT];
61static unsigned long long ot_overall_sync_count; 63static unsigned long long ot_overall_sync_count;
62static unsigned long long ot_overall_stall_count; 64static unsigned long long ot_overall_stall_count;
63 65
64static time_t ot_start_time; 66static time_t ot_start_time;
65 67
66#define STATS_NETWORK_NODE_BITWIDTH 4 68#define STATS_NETWORK_NODE_BITWIDTH 4
67#define STATS_NETWORK_NODE_COUNT (1<<STATS_NETWORK_NODE_BITWIDTH) 69#define STATS_NETWORK_NODE_COUNT (1 << STATS_NETWORK_NODE_BITWIDTH)
68 70
69#define __BYTE(P,D) (((uint8_t*)P)[D/8]) 71#define __BYTE(P, D) (((uint8_t *)P)[D / 8])
70#define __MSK (STATS_NETWORK_NODE_COUNT-1) 72#define __MSK (STATS_NETWORK_NODE_COUNT - 1)
71#define __SHFT(D) ((D^STATS_NETWORK_NODE_BITWIDTH)&STATS_NETWORK_NODE_BITWIDTH) 73#define __SHFT(D) ((D ^ STATS_NETWORK_NODE_BITWIDTH) & STATS_NETWORK_NODE_BITWIDTH)
72 74
73#define __LDR(P,D) ((__BYTE((P),(D))>>__SHFT((D)))&__MSK) 75#define __LDR(P, D) ((__BYTE((P), (D)) >> __SHFT((D))) & __MSK)
74#define __STR(P,D,V) __BYTE((P),(D))=(__BYTE((P),(D))&~(__MSK<<__SHFT((D))))|((V)<<__SHFT((D))) 76#define __STR(P, D, V) __BYTE((P), (D)) = (__BYTE((P), (D)) & ~(__MSK << __SHFT((D)))) | ((V) << __SHFT((D)))
75 77
76#ifdef WANT_V6 78#if 0
77#define STATS_NETWORK_NODE_MAXDEPTH (68-STATS_NETWORK_NODE_BITWIDTH) 79// XXX
78#define STATS_NETWORK_NODE_LIMIT (48-STATS_NETWORK_NODE_BITWIDTH) 80#define STATS_NETWORK_NODE_MAXDEPTH (68 - STATS_NETWORK_NODE_BITWIDTH)
79#else 81#define STATS_NETWORK_NODE_LIMIT (48 - STATS_NETWORK_NODE_BITWIDTH)
80#define STATS_NETWORK_NODE_MAXDEPTH (28-STATS_NETWORK_NODE_BITWIDTH)
81#define STATS_NETWORK_NODE_LIMIT (24-STATS_NETWORK_NODE_BITWIDTH)
82#endif 82#endif
83#define STATS_NETWORK_NODE_MAXDEPTH (28 - STATS_NETWORK_NODE_BITWIDTH)
84#define STATS_NETWORK_NODE_LIMIT (24 - STATS_NETWORK_NODE_BITWIDTH)
83 85
84typedef union stats_network_node stats_network_node; 86typedef union stats_network_node stats_network_node;
85union stats_network_node { 87union stats_network_node {
@@ -91,120 +93,125 @@ union stats_network_node {
91static stats_network_node *stats_network_counters_root; 93static stats_network_node *stats_network_counters_root;
92#endif 94#endif
93 95
94static int stat_increase_network_count( stats_network_node **pnode, int depth, uintptr_t ip ) { 96static int stat_increase_network_count(stats_network_node **pnode, int depth, uintptr_t ip) {
95 int foo = __LDR(ip,depth); 97 int foo = __LDR(ip, depth);
96 stats_network_node *node; 98 stats_network_node *node;
97 99
98 if( !*pnode ) { 100 if (!*pnode) {
99 *pnode = malloc( sizeof( stats_network_node ) ); 101 *pnode = malloc(sizeof(stats_network_node));
100 if( !*pnode ) 102 if (!*pnode)
101 return -1; 103 return -1;
102 memset( *pnode, 0, sizeof( stats_network_node ) ); 104 memset(*pnode, 0, sizeof(stats_network_node));
103 } 105 }
104 node = *pnode; 106 node = *pnode;
105 107
106 if( depth < STATS_NETWORK_NODE_MAXDEPTH ) 108 if (depth < STATS_NETWORK_NODE_MAXDEPTH)
107 return stat_increase_network_count( node->children + foo, depth+STATS_NETWORK_NODE_BITWIDTH, ip ); 109 return stat_increase_network_count(node->children + foo, depth + STATS_NETWORK_NODE_BITWIDTH, ip);
108 110
109 node->counters[ foo ]++; 111 node->counters[foo]++;
110 return 0; 112 return 0;
111} 113}
112 114
113static int stats_shift_down_network_count( stats_network_node **node, int depth, int shift ) { 115static int stats_shift_down_network_count(stats_network_node **node, int depth, int shift) {
114 int i, rest = 0; 116 int i, rest = 0;
115 117
116 if( !*node ) 118 if (!*node)
117 return 0; 119 return 0;
118 120
119 for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) 121 for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i)
120 if( depth < STATS_NETWORK_NODE_MAXDEPTH ) 122 if (depth < STATS_NETWORK_NODE_MAXDEPTH)
121 rest += stats_shift_down_network_count( (*node)->children + i, depth+STATS_NETWORK_NODE_BITWIDTH, shift ); 123 rest += stats_shift_down_network_count((*node)->children + i, depth + STATS_NETWORK_NODE_BITWIDTH, shift);
122 else 124 else
123 rest += (*node)->counters[i] >>= shift; 125 rest += (*node)->counters[i] >>= shift;
124 126
125 if( !rest ) { 127 if (!rest) {
126 free( *node ); 128 free(*node);
127 *node = NULL; 129 *node = NULL;
128 } 130 }
129 131
130 return rest; 132 return rest;
131} 133}
132 134
133static size_t stats_get_highscore_networks( stats_network_node *node, int depth, ot_ip6 node_value, size_t *scores, ot_ip6 *networks, int network_count, int limit ) { 135static size_t stats_get_highscore_networks(stats_network_node *node, int depth, ot_ip6 node_value, size_t *scores, ot_ip6 *networks, int network_count,
136 int limit) {
134 size_t score = 0; 137 size_t score = 0;
135 int i; 138 int i;
136 139
137 if( !node ) return 0; 140 if (!node)
141 return 0;
138 142
139 if( depth < limit ) { 143 if (depth < limit) {
140 for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) 144 for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i)
141 if( node->children[i] ) { 145 if (node->children[i]) {
142 __STR(node_value,depth,i); 146 __STR(node_value, depth, i);
143 score += stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); 147 score += stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit);
144 } 148 }
145 return score; 149 return score;
146 } 150 }
147 151
148 if( depth > limit && depth < STATS_NETWORK_NODE_MAXDEPTH ) { 152 if (depth > limit && depth < STATS_NETWORK_NODE_MAXDEPTH) {
149 for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) 153 for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i)
150 if( node->children[i] ) 154 if (node->children[i])
151 score += stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); 155 score += stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit);
152 return score; 156 return score;
153 } 157 }
154 158
155 if( depth > limit && depth == STATS_NETWORK_NODE_MAXDEPTH ) { 159 if (depth > limit && depth == STATS_NETWORK_NODE_MAXDEPTH) {
156 for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) 160 for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i)
157 score += node->counters[i]; 161 score += node->counters[i];
158 return score; 162 return score;
159 } 163 }
160 164
161 /* if( depth == limit ) */ 165 /* if( depth == limit ) */
162 for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) { 166 for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) {
163 int j=1; 167 int j = 1;
164 size_t node_score; 168 size_t node_score;
165 169
166 if( depth == STATS_NETWORK_NODE_MAXDEPTH ) 170 if (depth == STATS_NETWORK_NODE_MAXDEPTH)
167 node_score = node->counters[i]; 171 node_score = node->counters[i];
168 else 172 else
169 node_score = stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); 173 node_score = stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit);
170 174
171 score += node_score; 175 score += node_score;
172 176
173 if( node_score <= scores[0] ) continue; 177 if (node_score <= scores[0])
178 continue;
174 179
175 __STR(node_value,depth,i); 180 __STR(node_value, depth, i);
176 while( j < network_count && node_score > scores[j] ) ++j; 181 while (j < network_count && node_score > scores[j])
182 ++j;
177 --j; 183 --j;
178 184
179 memcpy( scores, scores + 1, j * sizeof( *scores ) ); 185 memcpy(scores, scores + 1, j * sizeof(*scores));
180 memcpy( networks, networks + 1, j * sizeof( *networks ) ); 186 memcpy(networks, networks + 1, j * sizeof(*networks));
181 scores[ j ] = node_score; 187 scores[j] = node_score;
182 memcpy( networks + j, node_value, sizeof( *networks ) ); 188 memcpy(networks + j, node_value, sizeof(*networks));
183 } 189 }
184 190
185 return score; 191 return score;
186} 192}
187 193
188static size_t stats_return_busy_networks( char * reply, stats_network_node *tree, int amount, int limit ) { 194static size_t stats_return_busy_networks(char *reply, stats_network_node *tree, int amount, int limit) {
189 ot_ip6 networks[amount]; 195 ot_ip6 networks[amount];
190 ot_ip6 node_value; 196 ot_ip6 node_value;
191 size_t scores[amount]; 197 size_t scores[amount];
192 int i; 198 int i;
193 char * r = reply; 199 char *r = reply;
194 200
195 memset( scores, 0, sizeof( scores ) ); 201 memset(scores, 0, sizeof(scores));
196 memset( networks, 0, sizeof( networks ) ); 202 memset(networks, 0, sizeof(networks));
197 memset( node_value, 0, sizeof( node_value ) ); 203 memset(node_value, 0, sizeof(node_value));
198 204
199 stats_get_highscore_networks( tree, 0, node_value, scores, networks, amount, limit ); 205 stats_get_highscore_networks(tree, 0, node_value, scores, networks, amount, limit);
200 206
201 r += sprintf( r, "Networks, limit /%d:\n", limit+STATS_NETWORK_NODE_BITWIDTH ); 207 r += sprintf(r, "Networks, limit /%d:\n", limit + STATS_NETWORK_NODE_BITWIDTH);
202 for( i=amount-1; i>=0; --i) { 208 for (i = amount - 1; i >= 0; --i) {
203 if( scores[i] ) { 209 if (scores[i]) {
204 r += sprintf( r, "%08zd: ", scores[i] ); 210 r += sprintf(r, "%08zd: ", scores[i]);
205#ifdef WANT_V6 211 // #ifdef WANT_V6
206 r += fmt_ip6c( r, networks[i] ); 212 r += fmt_ip6c(r, networks[i]);
207#else 213#if 0
214 // XXX
208 r += fmt_ip4( r, networks[i]); 215 r += fmt_ip4( r, networks[i]);
209#endif 216#endif
210 *r++ = '\n'; 217 *r++ = '\n';
@@ -215,64 +222,66 @@ static size_t stats_return_busy_networks( char * reply, stats_network_node *tree
215 return r - reply; 222 return r - reply;
216} 223}
217 224
218static size_t stats_slash24s_txt( char *reply, size_t amount ) { 225static size_t stats_slash24s_txt(char *reply, size_t amount) {
219 stats_network_node *slash24s_network_counters_root = NULL; 226 stats_network_node *slash24s_network_counters_root = NULL;
220 char *r=reply; 227 char *r = reply;
221 int bucket; 228 int bucket;
222 size_t i; 229 size_t i, peer_size = OT_PEER_SIZE4;
223 230
224 for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 231 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
225 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 232 ot_vector *torrents_list = mutex_bucket_lock(bucket);
226 for( i=0; i<torrents_list->size; ++i ) { 233 for (i = 0; i < torrents_list->size; ++i) {
227 ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[i] ).peer_list; 234 ot_peerlist *peer_list = (((ot_torrent *)(torrents_list->data))[i]).peer_list4;
228 ot_vector *bucket_list = &peer_list->peers; 235 ot_vector *bucket_list = &peer_list->peers;
229 int num_buckets = 1; 236 int num_buckets = 1;
230 237
231 if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { 238 if (OT_PEERLIST_HASBUCKETS(peer_list)) {
232 num_buckets = bucket_list->size; 239 num_buckets = bucket_list->size;
233 bucket_list = (ot_vector *)bucket_list->data; 240 bucket_list = (ot_vector *)bucket_list->data;
234 } 241 }
235 242
236 while( num_buckets-- ) { 243 while (num_buckets--) {
237 ot_peer *peers = (ot_peer*)bucket_list->data; 244 ot_peer *peers = (ot_peer *)bucket_list->data;
238 size_t numpeers = bucket_list->size; 245 size_t numpeers = bucket_list->size;
239 while( numpeers-- ) 246 while (numpeers--) {
240 if( stat_increase_network_count( &slash24s_network_counters_root, 0, (uintptr_t)(peers++) ) ) 247 if (stat_increase_network_count(&slash24s_network_counters_root, 0, (uintptr_t)(peers)))
241 goto bailout_unlock; 248 goto bailout_unlock;
249 peers += peer_size;
250 }
242 ++bucket_list; 251 ++bucket_list;
243 } 252 }
244 } 253 }
245 mutex_bucket_unlock( bucket, 0 ); 254 mutex_bucket_unlock(bucket, 0);
246 if( !g_opentracker_running ) 255 if (!g_opentracker_running)
247 goto bailout_error; 256 goto bailout_error;
248 } 257 }
249 258
250 /* The tree is built. Now analyze */ 259 /* The tree is built. Now analyze */
251 r += stats_return_busy_networks( r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_MAXDEPTH ); 260 r += stats_return_busy_networks(r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_MAXDEPTH);
252 r += stats_return_busy_networks( r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_LIMIT ); 261 r += stats_return_busy_networks(r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_LIMIT);
253 goto success; 262 goto success;
254 263
255bailout_unlock: 264bailout_unlock:
256 mutex_bucket_unlock( bucket, 0 ); 265 mutex_bucket_unlock(bucket, 0);
257bailout_error: 266bailout_error:
258 r = reply; 267 r = reply;
259success: 268success:
260 stats_shift_down_network_count( &slash24s_network_counters_root, 0, sizeof(int)*8-1 ); 269 stats_shift_down_network_count(&slash24s_network_counters_root, 0, sizeof(int) * 8 - 1);
261 270
262 return r-reply; 271 return r - reply;
263} 272}
264 273
265#ifdef WANT_SPOT_WOODPECKER 274#ifdef WANT_SPOT_WOODPECKER
266static stats_network_node *stats_woodpeckers_tree; 275static stats_network_node *stats_woodpeckers_tree;
267static pthread_mutex_t g_woodpeckers_mutex = PTHREAD_MUTEX_INITIALIZER; 276static pthread_mutex_t g_woodpeckers_mutex = PTHREAD_MUTEX_INITIALIZER;
268 277
269static size_t stats_return_woodpeckers( char * reply, int amount ) { 278static size_t stats_return_woodpeckers(char *reply, int amount) {
270 char * r = reply; 279 char *r = reply;
271 280
272 pthread_mutex_lock( &g_woodpeckers_mutex ); 281 pthread_mutex_lock(&g_woodpeckers_mutex);
273 r += stats_return_busy_networks( r, stats_woodpeckers_tree, amount, STATS_NETWORK_NODE_MAXDEPTH ); 282 r += stats_return_busy_networks(r, stats_woodpeckers_tree, amount, STATS_NETWORK_NODE_MAXDEPTH);
274 pthread_mutex_unlock( &g_woodpeckers_mutex ); 283 pthread_mutex_unlock(&g_woodpeckers_mutex);
275 return r-reply; 284 return r - reply;
276} 285}
277#endif 286#endif
278 287
@@ -282,492 +291,481 @@ typedef struct {
282 unsigned long long seed_count; 291 unsigned long long seed_count;
283} torrent_stats; 292} torrent_stats;
284 293
285static int torrent_statter( ot_torrent *torrent, uintptr_t data ) { 294static int torrent_statter(ot_torrent *torrent, uintptr_t data) {
286 torrent_stats *stats = (torrent_stats*)data; 295 torrent_stats *stats = (torrent_stats *)data;
287 stats->torrent_count++; 296 stats->torrent_count++;
288 stats->peer_count += torrent->peer_list->peer_count; 297 stats->peer_count += torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
289 stats->seed_count += torrent->peer_list->seed_count; 298 stats->seed_count += torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
290 return 0; 299 return 0;
291} 300}
292 301
293/* Converter function from memory to human readable hex strings */ 302/* Converter function from memory to human readable hex strings */
294static char*to_hex(char*d,uint8_t*s){char*m="0123456789ABCDEF";char *t=d;char*e=d+40;while(d<e){*d++=m[*s>>4];*d++=m[*s++&15];}*d=0;return t;} 303static char *to_hex(char *d, uint8_t *s) {
304 char *m = "0123456789ABCDEF";
305 char *t = d;
306 char *e = d + 40;
307 while (d < e) {
308 *d++ = m[*s >> 4];
309 *d++ = m[*s++ & 15];
310 }
311 *d = 0;
312 return t;
313}
295 314
296typedef struct { size_t val; ot_torrent * torrent; } ot_record; 315typedef struct {
316 size_t val;
317 ot_hash hash;
318} ot_record;
297 319
298/* Fetches stats from tracker */ 320/* Fetches stats from tracker */
299size_t stats_top_txt( char * reply, int amount ) { 321size_t stats_top_txt(char *reply, int amount) {
300 size_t j; 322 size_t j;
301 ot_record top100s[100], top100c[100]; 323 ot_record top100s[100], top100c[100];
302 char *r = reply, hex_out[42]; 324 char *r = reply, hex_out[42];
303 int idx, bucket; 325 int idx, bucket;
304 326
305 if( amount > 100 ) 327 if (amount > 100)
306 amount = 100; 328 amount = 100;
307 329
308 byte_zero( top100s, sizeof( top100s ) ); 330 byte_zero(top100s, sizeof(top100s));
309 byte_zero( top100c, sizeof( top100c ) ); 331 byte_zero(top100c, sizeof(top100c));
310 332
311 for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 333 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
312 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 334 ot_vector *torrents_list = mutex_bucket_lock(bucket);
313 for( j=0; j<torrents_list->size; ++j ) { 335 for (j = 0; j < torrents_list->size; ++j) {
314 ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[j] ).peer_list; 336 ot_torrent *torrent = (ot_torrent *)(torrents_list->data) + j;
315 int idx = amount - 1; while( (idx >= 0) && ( peer_list->peer_count > top100c[idx].val ) ) --idx; 337 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
316 if ( idx++ != amount - 1 ) { 338 size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
317 memmove( top100c + idx + 1, top100c + idx, ( amount - 1 - idx ) * sizeof( ot_record ) ); 339 idx = amount - 1;
318 top100c[idx].val = peer_list->peer_count; 340 while ((idx >= 0) && (peer_count > top100c[idx].val))
319 top100c[idx].torrent = (ot_torrent*)(torrents_list->data) + j; 341 --idx;
342 if (idx++ != amount - 1) {
343 memmove(top100c + idx + 1, top100c + idx, (amount - 1 - idx) * sizeof(ot_record));
344 memcpy(&top100c[idx].hash, &torrent->hash, sizeof(ot_hash));
345 top100c[idx].val = peer_count;
320 } 346 }
321 idx = amount - 1; while( (idx >= 0) && ( peer_list->seed_count > top100s[idx].val ) ) --idx; 347 idx = amount - 1;
322 if ( idx++ != amount - 1 ) { 348 while ((idx >= 0) && (seed_count > top100s[idx].val))
323 memmove( top100s + idx + 1, top100s + idx, ( amount - 1 - idx ) * sizeof( ot_record ) ); 349 --idx;
324 top100s[idx].val = peer_list->seed_count; 350 if (idx++ != amount - 1) {
325 top100s[idx].torrent = (ot_torrent*)(torrents_list->data) + j; 351 memmove(top100s + idx + 1, top100s + idx, (amount - 1 - idx) * sizeof(ot_record));
352 memcpy(&top100s[idx].hash, &torrent->hash, sizeof(ot_hash));
353 top100s[idx].val = seed_count;
326 } 354 }
327 } 355 }
328 mutex_bucket_unlock( bucket, 0 ); 356 mutex_bucket_unlock(bucket, 0);
329 if( !g_opentracker_running ) 357 if (!g_opentracker_running)
330 return 0; 358 return 0;
331 } 359 }
332 360
333 r += sprintf( r, "Top %d torrents by peers:\n", amount ); 361 r += sprintf(r, "Top %d torrents by peers:\n", amount);
334 for( idx=0; idx<amount; ++idx ) 362 for (idx = 0; idx < amount; ++idx)
335 if( top100c[idx].torrent ) 363 if (top100c[idx].val)
336 r += sprintf( r, "\t%zd\t%s\n", top100c[idx].val, to_hex( hex_out, top100c[idx].torrent->hash) ); 364 r += sprintf(r, "\t%zd\t%s\n", top100c[idx].val, to_hex(hex_out, top100c[idx].hash));
337 r += sprintf( r, "Top %d torrents by seeds:\n", amount ); 365 r += sprintf(r, "Top %d torrents by seeds:\n", amount);
338 for( idx=0; idx<amount; ++idx ) 366 for (idx = 0; idx < amount; ++idx)
339 if( top100s[idx].torrent ) 367 if (top100s[idx].val)
340 r += sprintf( r, "\t%zd\t%s\n", top100s[idx].val, to_hex( hex_out, top100s[idx].torrent->hash) ); 368 r += sprintf(r, "\t%zd\t%s\n", top100s[idx].val, to_hex(hex_out, top100s[idx].hash));
341 369
342 return r - reply; 370 return r - reply;
343} 371}
344 372
345static unsigned long events_per_time( unsigned long long events, time_t t ) { 373static unsigned long events_per_time(unsigned long long events, time_t t) { return events / ((unsigned int)t ? (unsigned int)t : 1); }
346 return events / ( (unsigned int)t ? (unsigned int)t : 1 );
347}
348 374
349static size_t stats_connections_mrtg( char * reply ) { 375static size_t stats_connections_mrtg(char *reply) {
350 ot_time t = time( NULL ) - ot_start_time; 376 ot_time t = time(NULL) - ot_start_time;
351 return sprintf( reply, 377 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.",
352 "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", 378 ot_overall_tcp_connections + ot_overall_udp_connections,
353 ot_overall_tcp_connections+ot_overall_udp_connections, 379 ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, (int)t, (int)(t / 3600),
354 ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, 380 events_per_time(ot_overall_tcp_connections + ot_overall_udp_connections, t),
355 (int)t, 381 events_per_time(ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, t));
356 (int)(t / 3600),
357 events_per_time( ot_overall_tcp_connections+ot_overall_udp_connections, t ),
358 events_per_time( ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, t )
359 );
360} 382}
361 383
362static size_t stats_udpconnections_mrtg( char * reply ) { 384static size_t stats_udpconnections_mrtg(char *reply) {
363 ot_time t = time( NULL ) - ot_start_time; 385 ot_time t = time(NULL) - ot_start_time;
364 return sprintf( reply, 386 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker udp4 stats, %lu conns/s :: %lu success/s.", ot_overall_udp_connections,
365 "%llu\n%llu\n%i seconds (%i hours)\nopentracker udp4 stats, %lu conns/s :: %lu success/s.", 387 ot_overall_udp_successfulannounces + ot_overall_udp_connects, (int)t, (int)(t / 3600), events_per_time(ot_overall_udp_connections, t),
366 ot_overall_udp_connections, 388 events_per_time(ot_overall_udp_successfulannounces + ot_overall_udp_connects, t));
367 ot_overall_udp_successfulannounces+ot_overall_udp_connects,
368 (int)t,
369 (int)(t / 3600),
370 events_per_time( ot_overall_udp_connections, t ),
371 events_per_time( ot_overall_udp_successfulannounces+ot_overall_udp_connects, t )
372 );
373} 389}
374 390
375static size_t stats_tcpconnections_mrtg( char * reply ) { 391static size_t stats_tcpconnections_mrtg(char *reply) {
376 time_t t = time( NULL ) - ot_start_time; 392 time_t t = time(NULL) - ot_start_time;
377 return sprintf( reply, 393 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker tcp4 stats, %lu conns/s :: %lu success/s.", ot_overall_tcp_connections,
378 "%llu\n%llu\n%i seconds (%i hours)\nopentracker tcp4 stats, %lu conns/s :: %lu success/s.", 394 ot_overall_tcp_successfulannounces, (int)t, (int)(t / 3600), events_per_time(ot_overall_tcp_connections, t),
379 ot_overall_tcp_connections, 395 events_per_time(ot_overall_tcp_successfulannounces, t));
380 ot_overall_tcp_successfulannounces,
381 (int)t,
382 (int)(t / 3600),
383 events_per_time( ot_overall_tcp_connections, t ),
384 events_per_time( ot_overall_tcp_successfulannounces, t )
385 );
386} 396}
387 397
388static size_t stats_scrape_mrtg( char * reply ) { 398static size_t stats_scrape_mrtg(char *reply) {
389 time_t t = time( NULL ) - ot_start_time; 399 time_t t = time(NULL) - ot_start_time;
390 return sprintf( reply, 400 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker scrape stats, %lu scrape/s (tcp and udp)", ot_overall_tcp_successfulscrapes,
391 "%llu\n%llu\n%i seconds (%i hours)\nopentracker scrape stats, %lu scrape/s (tcp and udp)", 401 ot_overall_udp_successfulscrapes, (int)t, (int)(t / 3600),
392 ot_overall_tcp_successfulscrapes, 402 events_per_time((ot_overall_tcp_successfulscrapes + ot_overall_udp_successfulscrapes), t));
393 ot_overall_udp_successfulscrapes,
394 (int)t,
395 (int)(t / 3600),
396 events_per_time( (ot_overall_tcp_successfulscrapes+ot_overall_udp_successfulscrapes), t )
397 );
398} 403}
399 404
400static size_t stats_fullscrapes_mrtg( char * reply ) { 405static size_t stats_fullscrapes_mrtg(char *reply) {
401 ot_time t = time( NULL ) - ot_start_time; 406 ot_time t = time(NULL) - ot_start_time;
402 return sprintf( reply, 407 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker full scrape stats, %lu conns/s :: %lu bytes/s.", ot_full_scrape_count * 1000,
403 "%llu\n%llu\n%i seconds (%i hours)\nopentracker full scrape stats, %lu conns/s :: %lu bytes/s.", 408 ot_full_scrape_size, (int)t, (int)(t / 3600), events_per_time(ot_full_scrape_count, t), events_per_time(ot_full_scrape_size, t));
404 ot_full_scrape_count * 1000,
405 ot_full_scrape_size,
406 (int)t,
407 (int)(t / 3600),
408 events_per_time( ot_full_scrape_count, t ),
409 events_per_time( ot_full_scrape_size, t )
410 );
411} 409}
412 410
413static size_t stats_peers_mrtg( char * reply ) { 411static size_t stats_peers_mrtg(char *reply) {
414 torrent_stats stats = {0,0,0}; 412 torrent_stats stats = {0, 0, 0};
415 413
416 iterate_all_torrents( torrent_statter, (uintptr_t)&stats ); 414 iterate_all_torrents(torrent_statter, (uintptr_t)&stats);
417 415
418 return sprintf( reply, "%llu\n%llu\nopentracker serving %llu torrents\nopentracker", 416 return sprintf(reply, "%llu\n%llu\nopentracker serving %llu torrents\nopentracker", stats.peer_count, stats.seed_count, stats.torrent_count);
419 stats.peer_count,
420 stats.seed_count,
421 stats.torrent_count
422 );
423} 417}
424 418
425static size_t stats_torrents_mrtg( char * reply ) 419static size_t stats_torrents_mrtg(char *reply) {
426{
427 size_t torrent_count = mutex_get_torrent_count(); 420 size_t torrent_count = mutex_get_torrent_count();
428 421
429 return sprintf( reply, "%zd\n%zd\nopentracker serving %zd torrents\nopentracker", 422 return sprintf(reply, "%zd\n%zd\nopentracker serving %zd torrents\nopentracker", torrent_count, (size_t)0, torrent_count);
430 torrent_count,
431 (size_t)0,
432 torrent_count
433 );
434} 423}
435 424
436static size_t stats_httperrors_txt ( char * reply ) { 425static size_t stats_httperrors_txt(char *reply) {
437 return sprintf( reply, "302 RED %llu\n400 ... %llu\n400 PAR %llu\n400 COM %llu\n403 IP %llu\n404 INV %llu\n500 SRV %llu\n", 426 return sprintf(reply, "302 RED %llu\n400 ... %llu\n400 PAR %llu\n400 COM %llu\n403 IP %llu\n404 INV %llu\n500 SRV %llu\n", ot_failed_request_counts[0],
438 ot_failed_request_counts[0], ot_failed_request_counts[1], ot_failed_request_counts[2], 427 ot_failed_request_counts[1], ot_failed_request_counts[2], ot_failed_request_counts[3], ot_failed_request_counts[4],
439 ot_failed_request_counts[3], ot_failed_request_counts[4], ot_failed_request_counts[5], 428 ot_failed_request_counts[5], ot_failed_request_counts[6]);
440 ot_failed_request_counts[6] );
441} 429}
442 430
443static size_t stats_return_renew_bucket( char * reply ) { 431static size_t stats_return_renew_bucket(char *reply) {
444 char *r = reply; 432 char *r = reply;
445 int i; 433 int i;
446 434
447 for( i=0; i<OT_PEER_TIMEOUT; ++i ) 435 for (i = 0; i < OT_PEER_TIMEOUT; ++i)
448 r+=sprintf(r,"%02i %llu\n", i, ot_renewed[i] ); 436 r += sprintf(r, "%02i %llu\n", i, ot_renewed[i]);
449 return r - reply; 437 return r - reply;
450} 438}
451 439
452static size_t stats_return_sync_mrtg( char * reply ) { 440static size_t stats_return_sync_mrtg(char *reply) {
453 ot_time t = time( NULL ) - ot_start_time; 441 ot_time t = time(NULL) - ot_start_time;
454 return sprintf( reply, 442 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", ot_overall_sync_count, 0LL, (int)t,
455 "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", 443 (int)(t / 3600), events_per_time(ot_overall_tcp_connections + ot_overall_udp_connections, t),
456 ot_overall_sync_count, 444 events_per_time(ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, t));
457 0LL,
458 (int)t,
459 (int)(t / 3600),
460 events_per_time( ot_overall_tcp_connections+ot_overall_udp_connections, t ),
461 events_per_time( ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, t )
462 );
463} 445}
464 446
465static size_t stats_return_completed_mrtg( char * reply ) { 447static size_t stats_return_completed_mrtg(char *reply) {
466 ot_time t = time( NULL ) - ot_start_time; 448 ot_time t = time(NULL) - ot_start_time;
467 449
468 return sprintf( reply, 450 return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker, %lu completed/h.", ot_overall_completed, 0LL, (int)t, (int)(t / 3600),
469 "%llu\n%llu\n%i seconds (%i hours)\nopentracker, %lu completed/h.", 451 events_per_time(ot_overall_completed, t / 3600));
470 ot_overall_completed,
471 0LL,
472 (int)t,
473 (int)(t / 3600),
474 events_per_time( ot_overall_completed, t / 3600 )
475 );
476} 452}
477 453
478#ifdef WANT_LOG_NUMWANT 454#ifdef WANT_LOG_NUMWANT
479extern unsigned long long numwants[201]; 455extern unsigned long long numwants[201];
480static size_t stats_return_numwants( char * reply ) { 456static size_t stats_return_numwants(char *reply) {
481 char * r = reply; 457 char *r = reply;
482 int i; 458 int i;
483 for( i=0; i<=200; ++i ) 459 for (i = 0; i <= 200; ++i)
484 r += sprintf( r, "%03d => %lld\n", i, numwants[i] ); 460 r += sprintf(r, "%03d => %lld\n", i, numwants[i]);
485 return r-reply; 461 return r - reply;
486} 462}
487#endif 463#endif
488 464
489#ifdef WANT_FULLLOG_NETWORKS 465#ifdef WANT_FULLLOG_NETWORKS
490static void stats_return_fulllog( int *iovec_entries, struct iovec **iovector, char *r ) { 466static void stats_return_fulllog(int *iovec_entries, struct iovec **iovector, char *r) {
491 ot_log *loglist = g_logchain_first, *llnext; 467 ot_log *loglist = g_logchain_first, *llnext;
492 char * re = r + OT_STATS_TMPSIZE; 468 char *re = r + OT_STATS_TMPSIZE;
493 469
494 g_logchain_first = g_logchain_last = 0; 470 g_logchain_first = g_logchain_last = 0;
495 471
496 while( loglist ) { 472 while (loglist) {
497 if( r + ( loglist->size + 64 ) >= re ) { 473 if (r + (loglist->size + 64) >= re) {
498 r = iovec_fix_increase_or_free( iovec_entries, iovector, r, 32 * OT_STATS_TMPSIZE ); 474 r = iovec_fix_increase_or_free(iovec_entries, iovector, r, 32 * OT_STATS_TMPSIZE);
499 if( !r ) return; 475 if (!r)
476 return;
500 re = r + 32 * OT_STATS_TMPSIZE; 477 re = r + 32 * OT_STATS_TMPSIZE;
501 } 478 }
502 r += sprintf( r, "%08ld: ", loglist->time ); 479 r += sprintf(r, "%08ld: ", loglist->time);
503 r += fmt_ip6c( r, loglist->ip ); 480 r += fmt_ip6c(r, loglist->ip);
504 *r++ = '\n'; 481 *r++ = '\n';
505 memcpy( r, loglist->data, loglist->size ); 482 memcpy(r, loglist->data, loglist->size);
506 r += loglist->size; 483 r += loglist->size;
507 *r++ = '\n'; 484 *r++ = '\n';
508 *r++ = '*'; 485 *r++ = '*';
509 *r++ = '\n'; 486 *r++ = '\n';
510 *r++ = '\n'; 487 *r++ = '\n';
511 488
512 llnext = loglist->next; 489 llnext = loglist->next;
513 free( loglist->data ); 490 free(loglist->data);
514 free( loglist ); 491 free(loglist);
515 loglist = llnext; 492 loglist = llnext;
516 } 493 }
517 iovec_fixlast( iovec_entries, iovector, r ); 494 iovec_fixlast(iovec_entries, iovector, r);
518} 495}
519#endif 496#endif
520 497
521static size_t stats_return_everything( char * reply ) { 498static size_t stats_return_everything(char *reply) {
522 torrent_stats stats = {0,0,0}; 499 torrent_stats stats = {0, 0, 0};
523 int i; 500 int i;
524 char * r = reply; 501 char *r = reply;
525 502
526 iterate_all_torrents( torrent_statter, (uintptr_t)&stats ); 503 iterate_all_torrents(torrent_statter, (uintptr_t)&stats);
527 504
528 r += sprintf( r, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" ); 505 r += sprintf(r, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n");
529 r += sprintf( r, "<stats>\n" ); 506 r += sprintf(r, "<stats>\n");
530 r += sprintf( r, " <tracker_id>%" PRIu32 "</tracker_id>\n", g_tracker_id ); 507 r += sprintf(r, " <tracker_id>%" PRIu32 "</tracker_id>\n", g_tracker_id);
531 r += sprintf( r, " <version>\n" ); r += stats_return_tracker_version( r ); r += sprintf( r, " </version>\n" ); 508 r += sprintf(r, " <version>\n");
532 r += sprintf( r, " <uptime>%llu</uptime>\n", (unsigned long long)(time( NULL ) - ot_start_time) ); 509 r += stats_return_tracker_version(r);
533 r += sprintf( r, " <torrents>\n" ); 510 r += sprintf(r, " </version>\n");
534 r += sprintf( r, " <count_mutex>%zd</count_mutex>\n", mutex_get_torrent_count() ); 511 r += sprintf(r, " <uptime>%llu</uptime>\n", (unsigned long long)(time(NULL) - ot_start_time));
535 r += sprintf( r, " <count_iterator>%llu</count_iterator>\n", stats.torrent_count ); 512 r += sprintf(r, " <torrents>\n");
536 r += sprintf( r, " </torrents>\n" ); 513 r += sprintf(r, " <count_mutex>%zd</count_mutex>\n", mutex_get_torrent_count());
537 r += sprintf( r, " <peers>\n <count>%llu</count>\n </peers>\n", stats.peer_count ); 514 r += sprintf(r, " <count_iterator>%llu</count_iterator>\n", stats.torrent_count);
538 r += sprintf( r, " <seeds>\n <count>%llu</count>\n </seeds>\n", stats.seed_count ); 515 r += sprintf(r, " </torrents>\n");
539 r += sprintf( r, " <completed>\n <count>%llu</count>\n </completed>\n", ot_overall_completed ); 516 r += sprintf(r, " <peers>\n <count>%llu</count>\n </peers>\n", stats.peer_count);
540 r += sprintf( r, " <connections>\n" ); 517 r += sprintf(r, " <seeds>\n <count>%llu</count>\n </seeds>\n", stats.seed_count);
541 r += sprintf( r, " <tcp>\n <accept>%llu</accept>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n </tcp>\n", ot_overall_tcp_connections, ot_overall_tcp_successfulannounces, ot_overall_tcp_successfulscrapes ); 518 r += sprintf(r, " <completed>\n <count>%llu</count>\n </completed>\n", ot_overall_completed);
542 r += sprintf( r, " <udp>\n <overall>%llu</overall>\n <connect>%llu</connect>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n <missmatch>%llu</missmatch>\n </udp>\n", ot_overall_udp_connections, ot_overall_udp_connects, ot_overall_udp_successfulannounces, ot_overall_udp_successfulscrapes, ot_overall_udp_connectionidmissmatches ); 519 r += sprintf(r, " <connections>\n");
543 r += sprintf( r, " <livesync>\n <count>%llu</count>\n </livesync>\n", ot_overall_sync_count ); 520 r += sprintf(r, " <tcp>\n <accept>%llu</accept>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n </tcp>\n",
544 r += sprintf( r, " </connections>\n" ); 521 ot_overall_tcp_connections, ot_overall_tcp_successfulannounces, ot_overall_tcp_successfulscrapes);
545 r += sprintf( r, " <debug>\n" ); 522 r += sprintf(r, " <udp>\n <overall>%llu</overall>\n <connect>%llu</connect>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n <missmatch>%llu</missmatch>\n </udp>\n",
546 r += sprintf( r, " <renew>\n" ); 523 ot_overall_udp_connections, ot_overall_udp_connects, ot_overall_udp_successfulannounces, ot_overall_udp_successfulscrapes,
547 for( i=0; i<OT_PEER_TIMEOUT; ++i ) 524 ot_overall_udp_connectionidmissmatches);
548 r += sprintf( r, " <count interval=\"%02i\">%llu</count>\n", i, ot_renewed[i] ); 525 r += sprintf(r, " <livesync>\n <count>%llu</count>\n </livesync>\n", ot_overall_sync_count);
549 r += sprintf( r, " </renew>\n" ); 526 r += sprintf(r, " </connections>\n");
550 r += sprintf( r, " <http_error>\n" ); 527 r += sprintf(r, " <debug>\n");
551 for( i=0; i<CODE_HTTPERROR_COUNT; ++i ) 528 r += sprintf(r, " <renew>\n");
552 r += sprintf( r, " <count code=\"%s\">%llu</count>\n", ot_failed_request_names[i], ot_failed_request_counts[i] ); 529 for (i = 0; i < OT_PEER_TIMEOUT; ++i)
553 r += sprintf( r, " </http_error>\n" ); 530 r += sprintf(r, " <count interval=\"%02i\">%llu</count>\n", i, ot_renewed[i]);
554 r += sprintf( r, " <mutex_stall>\n <count>%llu</count>\n </mutex_stall>\n", ot_overall_stall_count ); 531 r += sprintf(r, " </renew>\n");
555 r += sprintf( r, " </debug>\n" ); 532 r += sprintf(r, " <http_error>\n");
556 r += sprintf( r, "</stats>" ); 533 for (i = 0; i < CODE_HTTPERROR_COUNT; ++i)
534 r += sprintf(r, " <count code=\"%s\">%llu</count>\n", ot_failed_request_names[i], ot_failed_request_counts[i]);
535 r += sprintf(r, " </http_error>\n");
536 r += sprintf(r, " <mutex_stall>\n <count>%llu</count>\n </mutex_stall>\n", ot_overall_stall_count);
537 r += sprintf(r, " </debug>\n");
538 r += sprintf(r, "</stats>");
557 return r - reply; 539 return r - reply;
558} 540}
559 541
560extern const char 542size_t stats_return_tracker_version(char *reply) {
561*g_version_opentracker_c, *g_version_accesslist_c, *g_version_clean_c, *g_version_fullscrape_c, *g_version_http_c, 543#define QUOTE(name) #name
562*g_version_iovec_c, *g_version_mutex_c, *g_version_stats_c, *g_version_udp_c, *g_version_vector_c, 544#define SQUOTE(name) QUOTE(name)
563*g_version_scan_urlencoded_query_c, *g_version_trackerlogic_c, *g_version_livesync_c, *g_version_rijndael_c; 545 return sprintf(reply, "https://erdgeist.org/gitweb/opentracker/commit/?id=" SQUOTE(GIT_VERSION) "\n");
564 546}
565size_t stats_return_tracker_version( char *reply ) { 547
566 return sprintf( reply, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s", 548size_t return_stats_for_tracker(char *reply, int mode, int format) {
567 g_version_opentracker_c, g_version_accesslist_c, g_version_clean_c, g_version_fullscrape_c, g_version_http_c, 549 (void)format;
568 g_version_iovec_c, g_version_mutex_c, g_version_stats_c, g_version_udp_c, g_version_vector_c, 550 switch (mode & TASK_TASK_MASK) {
569 g_version_scan_urlencoded_query_c, g_version_trackerlogic_c, g_version_livesync_c, g_version_rijndael_c ); 551 case TASK_STATS_CONNS:
570} 552 return stats_connections_mrtg(reply);
571 553 case TASK_STATS_SCRAPE:
572size_t return_stats_for_tracker( char *reply, int mode, int format ) { 554 return stats_scrape_mrtg(reply);
573 (void) format; 555 case TASK_STATS_UDP:
574 switch( mode & TASK_TASK_MASK ) { 556 return stats_udpconnections_mrtg(reply);
575 case TASK_STATS_CONNS: 557 case TASK_STATS_TCP:
576 return stats_connections_mrtg( reply ); 558 return stats_tcpconnections_mrtg(reply);
577 case TASK_STATS_SCRAPE: 559 case TASK_STATS_FULLSCRAPE:
578 return stats_scrape_mrtg( reply ); 560 return stats_fullscrapes_mrtg(reply);
579 case TASK_STATS_UDP: 561 case TASK_STATS_COMPLETED:
580 return stats_udpconnections_mrtg( reply ); 562 return stats_return_completed_mrtg(reply);
581 case TASK_STATS_TCP: 563 case TASK_STATS_HTTPERRORS:
582 return stats_tcpconnections_mrtg( reply ); 564 return stats_httperrors_txt(reply);
583 case TASK_STATS_FULLSCRAPE: 565 case TASK_STATS_VERSION:
584 return stats_fullscrapes_mrtg( reply ); 566 return stats_return_tracker_version(reply);
585 case TASK_STATS_COMPLETED: 567 case TASK_STATS_RENEW:
586 return stats_return_completed_mrtg( reply ); 568 return stats_return_renew_bucket(reply);
587 case TASK_STATS_HTTPERRORS: 569 case TASK_STATS_SYNCS:
588 return stats_httperrors_txt( reply ); 570 return stats_return_sync_mrtg(reply);
589 case TASK_STATS_VERSION:
590 return stats_return_tracker_version( reply );
591 case TASK_STATS_RENEW:
592 return stats_return_renew_bucket( reply );
593 case TASK_STATS_SYNCS:
594 return stats_return_sync_mrtg( reply );
595#ifdef WANT_LOG_NUMWANT 571#ifdef WANT_LOG_NUMWANT
596 case TASK_STATS_NUMWANTS: 572 case TASK_STATS_NUMWANTS:
597 return stats_return_numwants( reply ); 573 return stats_return_numwants(reply);
598#endif 574#endif
599 default: 575 default:
600 return 0; 576 return 0;
601 } 577 }
602} 578}
603 579
604static void stats_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) { 580static void stats_make(int *iovec_entries, struct iovec **iovector, ot_tasktype mode) {
605 char *r; 581 char *r;
606 582
607 *iovec_entries = 0; 583 *iovec_entries = 0;
608 *iovector = NULL; 584 *iovector = NULL;
609 if( !( r = iovec_increase( iovec_entries, iovector, OT_STATS_TMPSIZE ) ) ) 585 if (!(r = iovec_increase(iovec_entries, iovector, OT_STATS_TMPSIZE)))
610 return; 586 return;
611 587
612 switch( mode & TASK_TASK_MASK ) { 588 switch (mode & TASK_TASK_MASK) {
613 case TASK_STATS_TORRENTS: r += stats_torrents_mrtg( r ); break; 589 case TASK_STATS_TORRENTS:
614 case TASK_STATS_PEERS: r += stats_peers_mrtg( r ); break; 590 r += stats_torrents_mrtg(r);
615 case TASK_STATS_SLASH24S: r += stats_slash24s_txt( r, 128 ); break; 591 break;
616 case TASK_STATS_TOP10: r += stats_top_txt( r, 10 ); break; 592 case TASK_STATS_PEERS:
617 case TASK_STATS_TOP100: 593 r += stats_peers_mrtg(r);
618 r = iovec_fix_increase_or_free( iovec_entries, iovector, r, 4 * OT_STATS_TMPSIZE ); 594 break;
619 if( !r ) return; 595 case TASK_STATS_SLASH24S:
620 r += stats_top_txt( r, 100 ); break; 596 r += stats_slash24s_txt(r, 128);
621 case TASK_STATS_EVERYTHING: r += stats_return_everything( r ); break; 597 break;
598 case TASK_STATS_TOP10:
599 r += stats_top_txt(r, 10);
600 break;
601 case TASK_STATS_TOP100:
602 r = iovec_fix_increase_or_free(iovec_entries, iovector, r, 4 * OT_STATS_TMPSIZE);
603 if (!r)
604 return;
605 r += stats_top_txt(r, 100);
606 break;
607 case TASK_STATS_EVERYTHING:
608 r = iovec_fix_increase_or_free(iovec_entries, iovector, r, OT_STATS_TMPSIZE + 64 * OT_PEER_TIMEOUT);
609 if (!r)
610 return;
611 r += stats_return_everything(r);
612 break;
622#ifdef WANT_SPOT_WOODPECKER 613#ifdef WANT_SPOT_WOODPECKER
623 case TASK_STATS_WOODPECKERS: r += stats_return_woodpeckers( r, 128 ); break; 614 case TASK_STATS_WOODPECKERS:
615 r += stats_return_woodpeckers(r, 128);
616 break;
624#endif 617#endif
625#ifdef WANT_FULLLOG_NETWORKS 618#ifdef WANT_FULLLOG_NETWORKS
626 case TASK_STATS_FULLLOG: stats_return_fulllog( iovec_entries, iovector, r ); 619 case TASK_STATS_FULLLOG:
627 return; 620 stats_return_fulllog(iovec_entries, iovector, r);
621 return;
628#endif 622#endif
629 default: 623 default:
630 iovec_free(iovec_entries, iovector); 624 iovec_free(iovec_entries, iovector);
631 return; 625 return;
632 } 626 }
633 iovec_fixlast( iovec_entries, iovector, r ); 627 iovec_fixlast(iovec_entries, iovector, r);
634} 628}
635 629
636void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ) { 630void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data) {
637 switch( event ) { 631 switch (event) {
638 case EVENT_ACCEPT: 632 case EVENT_ACCEPT:
639 if( proto == FLAG_TCP ) ot_overall_tcp_connections++; else ot_overall_udp_connections++; 633 if (proto == FLAG_TCP)
634 ot_overall_tcp_connections++;
635 else
636 ot_overall_udp_connections++;
640#ifdef WANT_LOG_NETWORKS 637#ifdef WANT_LOG_NETWORKS
641 stat_increase_network_count( &stats_network_counters_root, 0, event_data ); 638 stat_increase_network_count(&stats_network_counters_root, 0, event_data);
642#endif 639#endif
643 break; 640 break;
644 case EVENT_ANNOUNCE: 641 case EVENT_ANNOUNCE:
645 if( proto == FLAG_TCP ) ot_overall_tcp_successfulannounces++; else ot_overall_udp_successfulannounces++; 642 if (proto == FLAG_TCP)
646 break; 643 ot_overall_tcp_successfulannounces++;
647 case EVENT_CONNECT: 644 else
648 if( proto == FLAG_TCP ) ot_overall_tcp_connects++; else ot_overall_udp_connects++; 645 ot_overall_udp_successfulannounces++;
649 break; 646 break;
650 case EVENT_COMPLETED: 647 case EVENT_CONNECT:
648 if (proto == FLAG_TCP)
649 ot_overall_tcp_connects++;
650 else
651 ot_overall_udp_connects++;
652 break;
653 case EVENT_COMPLETED:
651#ifdef WANT_SYSLOGS 654#ifdef WANT_SYSLOGS
652 if( event_data) { 655 if (event_data) {
653 struct ot_workstruct *ws = (struct ot_workstruct *)event_data; 656 struct ot_workstruct *ws = (struct ot_workstruct *)event_data;
654 char timestring[64]; 657 char timestring[64];
655 char hash_hex[42], peerid_hex[42], ip_readable[64]; 658 char hash_hex[42], peerid_hex[42], ip_readable[64];
656 struct tm time_now; 659 struct tm time_now;
657 time_t ttt; 660 time_t ttt;
658 661
659 time( &ttt ); 662 time(&ttt);
660 localtime_r( &ttt, &time_now ); 663 localtime_r(&ttt, &time_now);
661 strftime( timestring, sizeof( timestring ), "%FT%T%z", &time_now ); 664 strftime(timestring, sizeof(timestring), "%FT%T%z", &time_now);
662 665
663 to_hex( hash_hex, *ws->hash ); 666 to_hex(hash_hex, *ws->hash);
664 if( ws->peer_id ) 667 if (ws->peer_id)
665 to_hex( peerid_hex, (uint8_t*)ws->peer_id ); 668 to_hex(peerid_hex, (uint8_t *)ws->peer_id);
666 else { 669 else {
667 *peerid_hex=0; 670 *peerid_hex = 0;
668 } 671 }
669 672
670#ifdef WANT_V6 673 ip_readable[fmt_ip6c(ip_readable, (char *)&ws->peer)] = 0;
671 ip_readable[ fmt_ip6c( ip_readable, (char*)&ws->peer ) ] = 0; 674#if 0
672#else 675 /* XXX */
673 ip_readable[ fmt_ip4( ip_readable, (char*)&ws->peer ) ] = 0; 676 ip_readable[ fmt_ip4( ip_readable, (char*)&ws->peer ) ] = 0;
674#endif 677#endif
675 syslog( LOG_INFO, "time=%s event=completed info_hash=%s peer_id=%s ip=%s", timestring, hash_hex, peerid_hex, ip_readable ); 678 syslog(LOG_INFO, "time=%s event=completed info_hash=%s peer_id=%s ip=%s", timestring, hash_hex, peerid_hex, ip_readable);
676 }
677#endif
678 ot_overall_completed++;
679 break;
680 case EVENT_SCRAPE:
681 if( proto == FLAG_TCP ) ot_overall_tcp_successfulscrapes++; else ot_overall_udp_successfulscrapes++;
682 break;
683 case EVENT_FULLSCRAPE:
684 ot_full_scrape_count++;
685 ot_full_scrape_size += event_data;
686 break;
687 case EVENT_FULLSCRAPE_REQUEST:
688 {
689 ot_ip6 *ip = (ot_ip6*)event_data; /* ugly hack to transfer ip to stats */
690 char _debug[512];
691 int off = snprintf( _debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time)/60 );
692 off += fmt_ip6c( _debug+off, *ip );
693 off += snprintf( _debug+off, sizeof(_debug)-off, " - FULL SCRAPE\n" );
694 write( 2, _debug, off );
695 ot_full_scrape_request_count++;
696 } 679 }
697 break; 680#endif
698 case EVENT_FULLSCRAPE_REQUEST_GZIP: 681 ot_overall_completed++;
699 { 682 break;
700 ot_ip6 *ip = (ot_ip6*)event_data; /* ugly hack to transfer ip to stats */ 683 case EVENT_SCRAPE:
701 char _debug[512]; 684 if (proto == FLAG_TCP)
702 int off = snprintf( _debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time)/60 ); 685 ot_overall_tcp_successfulscrapes++;
703 off += fmt_ip6c(_debug+off, *ip ); 686 else
704 off += snprintf( _debug+off, sizeof(_debug)-off, " - FULL SCRAPE\n" ); 687 ot_overall_udp_successfulscrapes++;
705 write( 2, _debug, off ); 688 break;
706 ot_full_scrape_request_count++; 689 case EVENT_FULLSCRAPE:
707 } 690 ot_full_scrape_count++;
708 break; 691 ot_full_scrape_size += event_data;
709 case EVENT_FAILED: 692 break;
710 ot_failed_request_counts[event_data]++; 693 case EVENT_FULLSCRAPE_REQUEST: {
711 break; 694 ot_ip6 *ip = (ot_ip6 *)event_data; /* ugly hack to transfer ip to stats */
712 case EVENT_RENEW: 695 char _debug[512];
713 ot_renewed[event_data]++; 696 int off = snprintf(_debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time) / 60);
714 break; 697 off += fmt_ip6c(_debug + off, *ip);
715 case EVENT_SYNC: 698 off += snprintf(_debug + off, sizeof(_debug) - off, " - FULL SCRAPE\n");
716 ot_overall_sync_count+=event_data; 699 (void)write(2, _debug, off);
717 break; 700 ot_full_scrape_request_count++;
718 case EVENT_BUCKET_LOCKED: 701 } break;
719 ot_overall_stall_count++; 702 case EVENT_FULLSCRAPE_REQUEST_GZIP: {
720 break; 703 ot_ip6 *ip = (ot_ip6 *)event_data; /* ugly hack to transfer ip to stats */
704 char _debug[512];
705 int off = snprintf(_debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time) / 60);
706 off += fmt_ip6c(_debug + off, *ip);
707 off += snprintf(_debug + off, sizeof(_debug) - off, " - FULL SCRAPE\n");
708 (void)write(2, _debug, off);
709 ot_full_scrape_request_count++;
710 } break;
711 case EVENT_FAILED:
712 ot_failed_request_counts[event_data]++;
713 break;
714 case EVENT_RENEW:
715 ot_renewed[event_data]++;
716 break;
717 case EVENT_SYNC:
718 ot_overall_sync_count += event_data;
719 break;
720 case EVENT_BUCKET_LOCKED:
721 ot_overall_stall_count++;
722 break;
721#ifdef WANT_SPOT_WOODPECKER 723#ifdef WANT_SPOT_WOODPECKER
722 case EVENT_WOODPECKER: 724 case EVENT_WOODPECKER:
723 pthread_mutex_lock( &g_woodpeckers_mutex ); 725 pthread_mutex_lock(&g_woodpeckers_mutex);
724 stat_increase_network_count( &stats_woodpeckers_tree, 0, event_data ); 726 stat_increase_network_count(&stats_woodpeckers_tree, 0, event_data);
725 pthread_mutex_unlock( &g_woodpeckers_mutex ); 727 pthread_mutex_unlock(&g_woodpeckers_mutex);
726 break; 728 break;
727#endif 729#endif
728 case EVENT_CONNID_MISSMATCH: 730 case EVENT_CONNID_MISSMATCH:
729 ++ot_overall_udp_connectionidmissmatches; 731 ++ot_overall_udp_connectionidmissmatches;
730 default: 732 default:
731 break; 733 break;
732 } 734 }
733} 735}
734 736
735void stats_cleanup() { 737void stats_cleanup() {
736#ifdef WANT_SPOT_WOODPECKER 738#ifdef WANT_SPOT_WOODPECKER
737 pthread_mutex_lock( &g_woodpeckers_mutex ); 739 pthread_mutex_lock(&g_woodpeckers_mutex);
738 stats_shift_down_network_count( &stats_woodpeckers_tree, 0, 1 ); 740 stats_shift_down_network_count(&stats_woodpeckers_tree, 0, 1);
739 pthread_mutex_unlock( &g_woodpeckers_mutex ); 741 pthread_mutex_unlock(&g_woodpeckers_mutex);
740#endif 742#endif
741} 743}
742 744
743static void * stats_worker( void * args ) { 745static void *stats_worker(void *args) {
744 int iovec_entries; 746 int iovec_entries;
745 struct iovec *iovector; 747 struct iovec *iovector;
746 748
747 (void) args; 749 (void)args;
748 750
749 while( 1 ) { 751 while (1) {
750 ot_tasktype tasktype = TASK_STATS; 752 ot_tasktype tasktype = TASK_STATS;
751 ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); 753 ot_taskid taskid = mutex_workqueue_poptask(&tasktype);
752 stats_make( &iovec_entries, &iovector, tasktype ); 754 stats_make(&iovec_entries, &iovector, tasktype);
753 if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) 755 if (mutex_workqueue_pushresult(taskid, iovec_entries, iovector))
754 iovec_free( &iovec_entries, &iovector ); 756 iovec_free(&iovec_entries, &iovector);
755 } 757 }
756 return NULL; 758 return NULL;
757} 759}
758 760
759void stats_deliver( int64 sock, int tasktype ) { 761void stats_deliver(int64 sock, int tasktype) { mutex_workqueue_pushtask(sock, tasktype); }
760 mutex_workqueue_pushtask( sock, tasktype );
761}
762 762
763static pthread_t thread_id; 763static pthread_t thread_id;
764void stats_init( ) { 764void stats_init() {
765 ot_start_time = g_now_seconds; 765 ot_start_time = g_now_seconds;
766 pthread_create( &thread_id, NULL, stats_worker, NULL ); 766 pthread_create(&thread_id, NULL, stats_worker, NULL);
767} 767}
768 768
769void stats_deinit( ) { 769void stats_deinit() {
770 pthread_cancel( thread_id ); 770 pthread_cancel(thread_id);
771} 771}
772
773const char *g_version_stats_c = "$Source$: $Revision$\n";
diff --git a/ot_stats.h b/ot_stats.h
index 6a2515b..8ed2b1e 100644
--- a/ot_stats.h
+++ b/ot_stats.h
@@ -6,10 +6,12 @@
6#ifndef OT_STATS_H__ 6#ifndef OT_STATS_H__
7#define OT_STATS_H__ 7#define OT_STATS_H__
8 8
9#include "trackerlogic.h"
10
9typedef enum { 11typedef enum {
10 EVENT_ACCEPT, 12 EVENT_ACCEPT,
11 EVENT_READ, 13 EVENT_READ,
12 EVENT_CONNECT, /* UDP only */ 14 EVENT_CONNECT, /* UDP only */
13 EVENT_ANNOUNCE, 15 EVENT_ANNOUNCE,
14 EVENT_COMPLETED, 16 EVENT_COMPLETED,
15 EVENT_RENEW, 17 EVENT_RENEW,
@@ -17,7 +19,8 @@ typedef enum {
17 EVENT_SCRAPE, 19 EVENT_SCRAPE,
18 EVENT_FULLSCRAPE_REQUEST, 20 EVENT_FULLSCRAPE_REQUEST,
19 EVENT_FULLSCRAPE_REQUEST_GZIP, 21 EVENT_FULLSCRAPE_REQUEST_GZIP,
20 EVENT_FULLSCRAPE, /* TCP only */ 22 EVENT_FULLSCRAPE_REQUEST_ZSTD,
23 EVENT_FULLSCRAPE, /* TCP only */
21 EVENT_FAILED, 24 EVENT_FAILED,
22 EVENT_BUCKET_LOCKED, 25 EVENT_BUCKET_LOCKED,
23 EVENT_WOODPECKER, 26 EVENT_WOODPECKER,
@@ -38,15 +41,12 @@ enum {
38 CODE_HTTPERROR_COUNT 41 CODE_HTTPERROR_COUNT
39}; 42};
40 43
41void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ); 44void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data);
42void stats_deliver( int64 sock, int tasktype ); 45void stats_deliver(int64 sock, int tasktype);
43void stats_cleanup(); 46void stats_cleanup(void);
44size_t return_stats_for_tracker( char *reply, int mode, int format ); 47size_t return_stats_for_tracker(char *reply, int mode, int format);
45size_t stats_return_tracker_version( char *reply ); 48size_t stats_return_tracker_version(char *reply);
46void stats_init( ); 49void stats_init(void);
47void stats_deinit( ); 50void stats_deinit(void);
48
49extern const char *g_version_rijndael_c;
50extern const char *g_version_livesync_c;
51 51
52#endif 52#endif
diff --git a/ot_sync.c b/ot_sync.c
index cd66a46..293acf3 100644
--- a/ot_sync.c
+++ b/ot_sync.c
@@ -4,64 +4,66 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <sys/types.h> 7#include <pthread.h>
8#include <sys/mman.h>
9#include <sys/uio.h>
10#include <stdio.h> 8#include <stdio.h>
11#include <string.h> 9#include <string.h>
12#include <pthread.h> 10#include <sys/mman.h>
11#include <sys/types.h>
12#include <sys/uio.h>
13 13
14/* Libowfat */ 14/* Libowfat */
15#include "scan.h"
16#include "byte.h" 15#include "byte.h"
17#include "io.h" 16#include "io.h"
17#include "scan.h"
18 18
19/* Opentracker */ 19/* Opentracker */
20#include "trackerlogic.h" 20#include "ot_iovec.h"
21#include "ot_mutex.h" 21#include "ot_mutex.h"
22#include "ot_sync.h"
23#include "ot_stats.h" 22#include "ot_stats.h"
24#include "ot_iovec.h" 23#include "ot_sync.h"
24#include "trackerlogic.h"
25 25
26#ifdef WANT_SYNC_BATCH 26#ifdef WANT_SYNC_BATCH
27 27
28#define OT_SYNC_CHUNK_SIZE (512*1024) 28#define OT_SYNC_CHUNK_SIZE (512 * 1024)
29 29
30/* Import Changeset from an external authority 30/* Import Changeset from an external authority
31 format: d4:syncd[..]ee 31 format: d4:syncd[..]ee
32 [..]: ( 20:01234567890abcdefghij16:XXXXYYYY )+ 32 [..]: ( 20:01234567890abcdefghij16:XXXXYYYY )+
33*/ 33*/
34int add_changeset_to_tracker( uint8_t *data, size_t len ) { 34int add_changeset_to_tracker(uint8_t *data, size_t len) {
35 ot_hash *hash; 35 ot_hash *hash;
36 uint8_t *end = data + len; 36 uint8_t *end = data + len;
37 unsigned long peer_count; 37 unsigned long peer_count;
38 38
39 /* We do know, that the string is \n terminated, so it cant 39 /* We do know, that the string is \n terminated, so it cant
40 overflow */ 40 overflow */
41 if( byte_diff( data, 8, "d4:syncd" ) ) return -1; 41 if (byte_diff(data, 8, "d4:syncd"))
42 return -1;
42 data += 8; 43 data += 8;
43 44
44 while( 1 ) { 45 while (1) {
45 if( byte_diff( data, 3, "20:" ) ) { 46 if (byte_diff(data, 3, "20:")) {
46 if( byte_diff( data, 2, "ee" ) ) 47 if (byte_diff(data, 2, "ee"))
47 return -1; 48 return -1;
48 return 0; 49 return 0;
49 } 50 }
50 data += 3; 51 data += 3;
51 hash = (ot_hash*)data; 52 hash = (ot_hash *)data;
52 data += sizeof( ot_hash ); 53 data += sizeof(ot_hash);
53 54
54 /* Scan string length indicator */ 55 /* Scan string length indicator */
55 data += ( len = scan_ulong( (char*)data, &peer_count ) ); 56 data += (len = scan_ulong((char *)data, &peer_count));
56 57
57 /* If no long was scanned, it is not divisible by 8, it is not 58 /* If no long was scanned, it is not divisible by 8, it is not
58 followed by a colon or claims to need to much memory, we fail */ 59 followed by a colon or claims to need to much memory, we fail */
59 if( !len || !peer_count || ( peer_count & 7 ) || ( *data++ != ':' ) || ( data + peer_count > end ) ) 60 if (!len || !peer_count || (peer_count & 7) || (*data++ != ':') || (data + peer_count > end))
60 return -1; 61 return -1;
61 62
62 while( peer_count > 0 ) { 63 while (peer_count > 0) {
63 add_peer_to_torrent( hash, (ot_peer*)data, 1 ); 64 add_peer_to_torrent(hash, (ot_peer *)data, 1);
64 data += 8; peer_count -= 8; 65 data += 8;
66 peer_count -= 8;
65 } 67 }
66 } 68 }
67 return 0; 69 return 0;
@@ -70,80 +72,86 @@ int add_changeset_to_tracker( uint8_t *data, size_t len ) {
70/* Proposed output format 72/* Proposed output format
71 d4:syncd20:<info_hash>8*N:(xxxxyyyy)*Nee 73 d4:syncd20:<info_hash>8*N:(xxxxyyyy)*Nee
72*/ 74*/
73static void sync_make( int *iovec_entries, struct iovec **iovector ) { 75static void sync_make(int *iovec_entries, struct iovec **iovector) {
74 int bucket; 76 int bucket;
75 char *r, *re; 77 char *r, *re;
76 78
77 /* Setup return vector... */ 79 /* Setup return vector... */
78 *iovec_entries = 0; 80 *iovec_entries = 0;
79 *iovector = NULL; 81 *iovector = NULL;
80 if( !( r = iovec_increase( iovec_entries, iovector, OT_SYNC_CHUNK_SIZE ) ) ) 82 if (!(r = iovec_increase(iovec_entries, iovector, OT_SYNC_CHUNK_SIZE)))
81 return; 83 return;
82 84
83 /* ... and pointer to end of current output buffer. 85 /* ... and pointer to end of current output buffer.
84 This works as a low watermark */ 86 This works as a low watermark */
85 re = r + OT_SYNC_CHUNK_SIZE; 87 re = r + OT_SYNC_CHUNK_SIZE;
86 88
87 memmove( r, "d4:syncd", 8 ); r += 8; 89 memmove(r, "d4:syncd", 8);
90 r += 8;
88 91
89 /* For each bucket... */ 92 /* For each bucket... */
90 for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 93 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
91 /* Get exclusive access to that bucket */ 94 /* Get exclusive access to that bucket */
92 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 95 ot_vector *torrents_list = mutex_bucket_lock(bucket);
93 size_t tor_offset; 96 size_t tor_offset;
94 97
95 /* For each torrent in this bucket.. */ 98 /* For each torrent in this bucket.. */
96 for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { 99 for (tor_offset = 0; tor_offset < torrents_list->size; ++tor_offset) {
97 /* Address torrents members */ 100 /* Address torrents members */
98 ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[tor_offset] ).peer_list; 101 ot_peerlist *peer_list = (((ot_torrent *)(torrents_list->data))[tor_offset]).peer_list;
99 ot_hash *hash =&( ((ot_torrent*)(torrents_list->data))[tor_offset] ).hash; 102 ot_hash *hash = &(((ot_torrent *)(torrents_list->data))[tor_offset]).hash;
100 const size_t byte_count = sizeof(ot_peer) * peer_list->changeset.size; 103 const size_t byte_count = sizeof(ot_peer) * peer_list->changeset.size;
101 104
102 /* If we reached our low watermark in buffer... */ 105 /* If we reached our low watermark in buffer... */
103 if( re - r <= (ssize_t)(/* strlen( "20:" ) == */ 3 + sizeof( ot_hash ) + /* strlen_max( "%zd" ) == */ 12 + byte_count ) ) { 106 if (re - r <= (ssize_t)(/* strlen( "20:" ) == */ 3 + sizeof(ot_hash) + /* strlen_max( "%zd" ) == */ 12 + byte_count)) {
104 107
105 /* Allocate a fresh output buffer at the end of our buffers list 108 /* Allocate a fresh output buffer at the end of our buffers list
106 release bucket and return, if that fails */ 109 release bucket and return, if that fails */
107 if( !( r = iovec_fix_increase_or_free( iovec_entries, iovector, r, OT_SYNC_CHUNK_SIZE ) ) ) 110 if (!(r = iovec_fix_increase_or_free(iovec_entries, iovector, r, OT_SYNC_CHUNK_SIZE)))
108 return mutex_bucket_unlock( bucket ); 111 return mutex_bucket_unlock(bucket);
109 112
110 /* Adjust new end of output buffer */ 113 /* Adjust new end of output buffer */
111 re = r + OT_SYNC_CHUNK_SIZE; 114 re = r + OT_SYNC_CHUNK_SIZE;
112 } 115 }
113 116
114 *r++ = '2'; *r++ = '0'; *r++ = ':'; 117 *r++ = '2';
115 memmove( r, hash, sizeof( ot_hash ) ); r += sizeof( ot_hash ); 118 *r++ = '0';
116 r += sprintf( r, "%zd:", byte_count ); 119 *r++ = ':';
117 memmove( r, peer_list->changeset.data, byte_count ); r += byte_count; 120 memmove(r, hash, sizeof(ot_hash));
121 r += sizeof(ot_hash);
122 r += sprintf(r, "%zd:", byte_count);
123 memmove(r, peer_list->changeset.data, byte_count);
124 r += byte_count;
118 } 125 }
119 126
120 /* All torrents done: release lock on currenct bucket */ 127 /* All torrents done: release lock on currenct bucket */
121 mutex_bucket_unlock( bucket ); 128 mutex_bucket_unlock(bucket);
122 } 129 }
123 130
124 /* Close bencoded sync dictionary */ 131 /* Close bencoded sync dictionary */
125 *r++='e'; *r++='e'; 132 *r++ = 'e';
133 *r++ = 'e';
126 134
127 /* Release unused memory in current output buffer */ 135 /* Release unused memory in current output buffer */
128 iovec_fixlast( iovec_entries, iovector, r ); 136 iovec_fixlast(iovec_entries, iovector, r);
129} 137}
130 138
131/* This is the entry point into this worker thread 139/* This is the entry point into this worker thread
132 It grabs tasks from mutex_tasklist and delivers results back 140 It grabs tasks from mutex_tasklist and delivers results back
133*/ 141*/
134static void * sync_worker( void * args) { 142static void *sync_worker(void *args) {
135 int iovec_entries; 143 int iovec_entries;
136 struct iovec *iovector; 144 struct iovec *iovector;
137 145
138 args = args; 146 args = args;
139 147
140 while( 1 ) { 148 while (1) {
141 ot_tasktype tasktype = TASK_SYNC_OUT; 149 ot_tasktype tasktype = TASK_SYNC_OUT;
142 ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); 150 ot_taskid taskid = mutex_workqueue_poptask(&tasktype);
143 sync_make( &iovec_entries, &iovector ); 151 sync_make(&iovec_entries, &iovector);
144 stats_issue_event( EVENT_SYNC_OUT, FLAG_TCP, iovec_length( &iovec_entries, &iovector) ); 152 stats_issue_event(EVENT_SYNC_OUT, FLAG_TCP, iovec_length(&iovec_entries, &iovector));
145 if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) 153 if (mutex_workqueue_pushresult(taskid, iovec_entries, iovector))
146 iovec_free( &iovec_entries, &iovector ); 154 iovec_free(&iovec_entries, &iovector);
147 } 155 }
148 return NULL; 156 return NULL;
149} 157}
@@ -162,5 +170,3 @@ void sync_deliver( int64 socket ) {
162} 170}
163 171
164#endif 172#endif
165
166const char *g_version_sync_c = "$Source$: $Revision$\n";
diff --git a/ot_sync.h b/ot_sync.h
index 441010d..0fe0739 100644
--- a/ot_sync.h
+++ b/ot_sync.h
@@ -9,11 +9,11 @@
9#ifdef WANT_SYNC_BATCH 9#ifdef WANT_SYNC_BATCH
10enum { SYNC_IN, SYNC_OUT }; 10enum { SYNC_IN, SYNC_OUT };
11 11
12void sync_init( ); 12void sync_init();
13void sync_deinit( ); 13void sync_deinit();
14void sync_deliver( int64 socket ); 14void sync_deliver(int64 socket);
15 15
16int add_changeset_to_tracker( uint8_t *data, size_t len ); 16int add_changeset_to_tracker(uint8_t *data, size_t len);
17#else 17#else
18 18
19#define sync_init() 19#define sync_init()
diff --git a/ot_udp.c b/ot_udp.c
index 3bf311c..97ccd38 100644
--- a/ot_udp.c
+++ b/ot_udp.c
@@ -4,212 +4,233 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <stdlib.h>
8#include <pthread.h>
9#include <string.h>
10#include <arpa/inet.h> 7#include <arpa/inet.h>
8#include <pthread.h>
11#include <stdio.h> 9#include <stdio.h>
10#include <stdlib.h>
11#include <string.h>
12 12
13/* Libowfat */ 13/* Libowfat */
14#include "socket.h"
15#include "io.h" 14#include "io.h"
15#include "ip6.h"
16#include "socket.h"
16 17
17/* Opentracker */ 18/* Opentracker */
18#include "trackerlogic.h"
19#include "ot_udp.h"
20#include "ot_stats.h"
21#include "ot_rijndael.h" 19#include "ot_rijndael.h"
20#include "ot_stats.h"
21#include "ot_udp.h"
22#include "trackerlogic.h"
22 23
23#if 0 24#if 0
24static const uint8_t g_static_connid[8] = { 0x23, 0x42, 0x05, 0x17, 0xde, 0x41, 0x50, 0xff }; 25static const uint8_t g_static_connid[8] = { 0x23, 0x42, 0x05, 0x17, 0xde, 0x41, 0x50, 0xff };
25#endif 26#endif
26static uint32_t g_rijndael_round_key[44] = {0}; 27static uint32_t g_rijndael_round_key[44] = {0};
27static uint32_t g_key_of_the_hour[2] = {0}; 28static uint32_t g_key_of_the_hour[2] = {0};
28static ot_time g_hour_of_the_key; 29static ot_time g_hour_of_the_key;
29 30
30static void udp_generate_rijndael_round_key() { 31static void udp_generate_rijndael_round_key() {
31 uint32_t key[16]; 32 uint32_t key[16];
33#ifdef WANT_ARC4RANDOM
34 arc4random_buf(&key[0], sizeof(key));
35#else
32 key[0] = random(); 36 key[0] = random();
33 key[1] = random(); 37 key[1] = random();
34 key[2] = random(); 38 key[2] = random();
35 key[3] = random(); 39 key[3] = random();
36 rijndaelKeySetupEnc128( g_rijndael_round_key, (uint8_t*)key ); 40#endif
41 rijndaelKeySetupEnc128(g_rijndael_round_key, (uint8_t *)key);
37 42
43#ifdef WANT_ARC4RANDOM
44 g_key_of_the_hour[0] = arc4random();
45#else
38 g_key_of_the_hour[0] = random(); 46 g_key_of_the_hour[0] = random();
47#endif
39 g_hour_of_the_key = g_now_minutes; 48 g_hour_of_the_key = g_now_minutes;
40} 49}
41 50
42/* Generate current and previous connection id for ip */ 51/* Generate current and previous connection id for ip */
43static void udp_make_connectionid( uint32_t connid[2], const ot_ip6 remoteip, int age ) { 52static void udp_make_connectionid(uint32_t connid[2], const ot_ip6 remoteip, int age) {
44 uint32_t plain[4], crypt[4]; 53 uint32_t plain[4], crypt[4];
45 int i; 54 int i;
46 if( g_now_minutes + 60 > g_hour_of_the_key ) { 55 if (g_now_minutes + 60 > g_hour_of_the_key) {
47 g_hour_of_the_key = g_now_minutes; 56 g_hour_of_the_key = g_now_minutes;
48 g_key_of_the_hour[1] = g_key_of_the_hour[0]; 57 g_key_of_the_hour[1] = g_key_of_the_hour[0];
58#ifdef WANT_ARC4RANDOM
59 g_key_of_the_hour[0] = arc4random();
60#else
49 g_key_of_the_hour[0] = random(); 61 g_key_of_the_hour[0] = random();
62#endif
50 } 63 }
51 64
52 memcpy( plain, remoteip, sizeof( plain ) ); 65 memcpy(plain, remoteip, sizeof(plain));
53 for( i=0; i<4; ++i ) plain[i] ^= g_key_of_the_hour[age]; 66 for (i = 0; i < 4; ++i)
54 rijndaelEncrypt128( g_rijndael_round_key, (uint8_t*)remoteip, (uint8_t*)crypt ); 67 plain[i] ^= g_key_of_the_hour[age];
68 rijndaelEncrypt128(g_rijndael_round_key, (uint8_t *)remoteip, (uint8_t *)crypt);
55 connid[0] = crypt[0] ^ crypt[1]; 69 connid[0] = crypt[0] ^ crypt[1];
56 connid[1] = crypt[2] ^ crypt[3]; 70 connid[1] = crypt[2] ^ crypt[3];
57} 71}
58 72
59/* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */ 73/* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */
60int handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { 74int handle_udp6(int64 serversocket, struct ot_workstruct *ws) {
61 ot_ip6 remoteip; 75 ot_ip6 remoteip;
62 uint32_t *inpacket = (uint32_t*)ws->inbuf; 76 uint32_t *inpacket = (uint32_t *)ws->inbuf;
63 uint32_t *outpacket = (uint32_t*)ws->outbuf; 77 uint32_t *outpacket = (uint32_t *)ws->outbuf;
64 uint32_t numwant, left, event, scopeid; 78 uint32_t left, event, scopeid;
65 uint32_t connid[2]; 79 uint32_t connid[2];
66 uint32_t action; 80 uint32_t action;
67 uint16_t port, remoteport; 81 uint16_t port, remoteport;
68 size_t byte_count, scrape_count; 82 size_t byte_count, scrape_count;
69 83
70 byte_count = socket_recv6( serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid ); 84 byte_count = socket_recv6(serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid);
71 if( !byte_count ) return 0; 85 if (!byte_count)
72 86 return 0;
73 stats_issue_event( EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip ); 87
74 stats_issue_event( EVENT_READ, FLAG_UDP, byte_count ); 88 stats_issue_event(EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip);
89 stats_issue_event(EVENT_READ, FLAG_UDP, byte_count);
75 90
76 /* Minimum udp tracker packet size, also catches error */ 91 /* Minimum udp tracker packet size, also catches error */
77 if( byte_count < 16 ) 92 if (byte_count < 16)
78 return 1; 93 return 1;
79 94
80 /* Get action to take. Ignore error messages and broken packets */ 95 /* Get action to take. Ignore error messages and broken packets */
81 action = ntohl( inpacket[2] ); 96 action = ntohl(inpacket[2]);
82 if( action > 2 ) 97 if (action > 2)
83 return 1; 98 return 1;
84 99
85 /* Generate the connection id we give out and expect to and from 100 /* Generate the connection id we give out and expect to and from
86 the requesting ip address, this prevents udp spoofing */ 101 the requesting ip address, this prevents udp spoofing */
87 udp_make_connectionid( connid, remoteip, 0 ); 102 udp_make_connectionid(connid, remoteip, 0);
88 103
89 /* Initialise hash pointer */ 104 /* Initialise hash pointer */
90 ws->hash = NULL; 105 ws->hash = NULL;
91 ws->peer_id = NULL; 106 ws->peer_id = NULL;
92 107
93 /* If action is not 0 (connect), then we expect the derived 108 /* If action is not 0 (connect), then we expect the derived
94 connection id in first 64 bit */ 109 connection id in first 64 bit */
95 if( ( action > 0 ) && ( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) ) { 110 if ((action > 0) && (inpacket[0] != connid[0] || inpacket[1] != connid[1])) {
96 /* If connection id does not match, try the one that was 111 /* If connection id does not match, try the one that was
97 valid in the previous hour. Only if this also does not 112 valid in the previous hour. Only if this also does not
98 match, return an error packet */ 113 match, return an error packet */
99 udp_make_connectionid( connid, remoteip, 1 ); 114 udp_make_connectionid(connid, remoteip, 1);
100 if( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) { 115 if (inpacket[0] != connid[0] || inpacket[1] != connid[1]) {
101 const size_t s = sizeof( "Connection ID missmatch." ); 116 const size_t s = sizeof("Connection ID missmatch.");
102 outpacket[0] = htonl( 3 ); outpacket[1] = inpacket[3]; 117 outpacket[0] = htonl(3);
103 memcpy( &outpacket[2], "Connection ID missmatch.", s ); 118 outpacket[1] = inpacket[3];
104 socket_send6( serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0 ); 119 memcpy(&outpacket[2], "Connection ID missmatch.", s);
105 stats_issue_event( EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s ); 120 socket_send6(serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0);
121 stats_issue_event(EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s);
106 return 1; 122 return 1;
107 } 123 }
108 } 124 }
109 125
110 switch( action ) { 126 switch (action) {
111 case 0: /* This is a connect action */ 127 case 0: /* This is a connect action */
112 /* look for udp bittorrent magic id */ 128 /* look for udp bittorrent magic id */
113 if( (ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980) ) 129 if ((ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980))
114 return 1; 130 return 1;
131
132 outpacket[0] = 0;
133 outpacket[1] = inpacket[3];
134 outpacket[2] = connid[0];
135 outpacket[3] = connid[1];
136
137 socket_send6(serversocket, ws->outbuf, 16, remoteip, remoteport, 0);
138 stats_issue_event(EVENT_CONNECT, FLAG_UDP, 16);
139 break;
140 case 1: /* This is an announce action */
141 /* Minimum udp announce packet size */
142 if (byte_count < 98)
143 return 1;
144
145 /* We do only want to know, if it is zero */
146 left = inpacket[64 / 4] | inpacket[68 / 4];
147
148 event = ntohl(inpacket[80 / 4]);
149 port = *(uint16_t *)(((char *)inpacket) + 96);
150 ws->hash = (ot_hash *)(((char *)inpacket) + 16);
115 151
116 outpacket[0] = 0; 152 OT_SETIP(ws->peer, remoteip);
117 outpacket[1] = inpacket[3]; 153 OT_SETPORT(ws->peer, &port);
118 outpacket[2] = connid[0]; 154 OT_PEERFLAG(ws->peer) = 0;
119 outpacket[3] = connid[1];
120 155
121 socket_send6( serversocket, ws->outbuf, 16, remoteip, remoteport, 0 ); 156 switch (event) {
122 stats_issue_event( EVENT_CONNECT, FLAG_UDP, 16 ); 157 case 1:
158 OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED;
123 break; 159 break;
124 case 1: /* This is an announce action */ 160 case 3:
125 /* Minimum udp announce packet size */ 161 OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED;
126 if( byte_count < 98 )
127 return 1;
128
129 /* We do only want to know, if it is zero */
130 left = inpacket[64/4] | inpacket[68/4];
131
132 /* Limit amount of peers to 200 */
133 numwant = ntohl( inpacket[92/4] );
134 if (numwant > 200) numwant = 200;
135
136 event = ntohl( inpacket[80/4] );
137 port = *(uint16_t*)( ((char*)inpacket) + 96 );
138 ws->hash = (ot_hash*)( ((char*)inpacket) + 16 );
139
140 OT_SETIP( &ws->peer, remoteip );
141 OT_SETPORT( &ws->peer, &port );
142 OT_PEERFLAG( &ws->peer ) = 0;
143
144 switch( event ) {
145 case 1: OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED; break;
146 case 3: OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED; break;
147 default: break;
148 }
149
150 if( !left )
151 OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_SEEDING;
152
153 outpacket[0] = htonl( 1 ); /* announce action */
154 outpacket[1] = inpacket[12/4];
155
156 if( OT_PEERFLAG( &ws->peer ) & PEER_FLAG_STOPPED ) { /* Peer is gone. */
157 ws->reply = ws->outbuf;
158 ws->reply_size = remove_peer_from_torrent( FLAG_UDP, ws );
159 } else {
160 ws->reply = ws->outbuf + 8;
161 ws->reply_size = 8 + add_peer_to_torrent_and_return_peers( FLAG_UDP, ws, numwant );
162 }
163
164 socket_send6( serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0 );
165 stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size );
166 break; 162 break;
163 default:
164 break;
165 }
167 166
168 case 2: /* This is a scrape action */ 167 if (!left)
169 outpacket[0] = htonl( 2 ); /* scrape action */ 168 OT_PEERFLAG(ws->peer) |= PEER_FLAG_SEEDING;
170 outpacket[1] = inpacket[12/4];
171 169
172 for( scrape_count = 0; ( scrape_count * 20 < byte_count - 16) && ( scrape_count <= 74 ); scrape_count++ ) 170 outpacket[0] = htonl(1); /* announce action */
173 return_udp_scrape_for_torrent( *(ot_hash*)( ((char*)inpacket) + 16 + 20 * scrape_count ), ((char*)outpacket) + 8 + 12 * scrape_count ); 171 outpacket[1] = inpacket[12 / 4];
174 172
175 socket_send6( serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0 ); 173 if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED) { /* Peer is gone. */
176 stats_issue_event( EVENT_SCRAPE, FLAG_UDP, scrape_count ); 174 ws->reply = ws->outbuf;
177 break; 175 ws->reply_size = remove_peer_from_torrent(FLAG_UDP, ws);
176 } else {
177 /* Limit amount of peers to OT_MAX_PEERS_UDP */
178 uint32_t numwant = ntohl(inpacket[92 / 4]);
179 size_t max_peers = ip6_isv4mapped(remoteip) ? OT_MAX_PEERS_UDP4 : OT_MAX_PEERS_UDP6;
180 if (numwant > max_peers)
181 numwant = max_peers;
182
183 ws->reply = ws->outbuf + 8;
184 ws->reply_size = 8 + add_peer_to_torrent_and_return_peers(FLAG_UDP, ws, numwant);
185 }
186
187 socket_send6(serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0);
188 stats_issue_event(EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size);
189 break;
190
191 case 2: /* This is a scrape action */
192 outpacket[0] = htonl(2); /* scrape action */
193 outpacket[1] = inpacket[12 / 4];
194
195 for (scrape_count = 0; (scrape_count * 20 < byte_count - 16) && (scrape_count <= 74); scrape_count++)
196 return_udp_scrape_for_torrent(*(ot_hash *)(((char *)inpacket) + 16 + 20 * scrape_count), ((char *)outpacket) + 8 + 12 * scrape_count);
197
198 socket_send6(serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0);
199 stats_issue_event(EVENT_SCRAPE, FLAG_UDP, scrape_count);
200 break;
178 } 201 }
179 return 1; 202 return 1;
180} 203}
181 204
182static void* udp_worker( void * args ) { 205static void *udp_worker(void *args) {
183 int64 sock = (int64)args; 206 int64 sock = (int64)args;
184 struct ot_workstruct ws; 207 struct ot_workstruct ws;
185 memset( &ws, 0, sizeof(ws) ); 208 memset(&ws, 0, sizeof(ws));
186 209
187 ws.inbuf=malloc(G_INBUF_SIZE); 210 ws.inbuf = malloc(G_INBUF_SIZE);
188 ws.outbuf=malloc(G_OUTBUF_SIZE); 211 ws.outbuf = malloc(G_OUTBUF_SIZE);
189#ifdef _DEBUG_HTTPERROR 212#ifdef _DEBUG_HTTPERROR
190 ws.debugbuf=malloc(G_DEBUGBUF_SIZE); 213 ws.debugbuf = malloc(G_DEBUGBUF_SIZE);
191#endif 214#endif
192 215
193 while( g_opentracker_running ) 216 while (g_opentracker_running)
194 handle_udp6( sock, &ws ); 217 handle_udp6(sock, &ws);
195 218
196 free( ws.inbuf ); 219 free(ws.inbuf);
197 free( ws.outbuf ); 220 free(ws.outbuf);
198#ifdef _DEBUG_HTTPERROR 221#ifdef _DEBUG_HTTPERROR
199 free( ws.debugbuf ); 222 free(ws.debugbuf);
200#endif 223#endif
201 return NULL; 224 return NULL;
202} 225}
203 226
204void udp_init( int64 sock, unsigned int worker_count ) { 227void udp_init(int64 sock, unsigned int worker_count) {
205 pthread_t thread_id; 228 pthread_t thread_id;
206 if( !g_rijndael_round_key[0] ) 229 if (!g_rijndael_round_key[0])
207 udp_generate_rijndael_round_key(); 230 udp_generate_rijndael_round_key();
208#ifdef _DEBUG 231#ifdef _DEBUG
209 fprintf( stderr, " installing %d workers on udp socket %ld", worker_count, (unsigned long)sock ); 232 fprintf(stderr, " installing %d workers on udp socket %ld\n", worker_count, (unsigned long)sock);
210#endif 233#endif
211 while( worker_count-- ) 234 while (worker_count--)
212 pthread_create( &thread_id, NULL, udp_worker, (void *)sock ); 235 pthread_create(&thread_id, NULL, udp_worker, (void *)sock);
213} 236}
214
215const char *g_version_udp_c = "$Source$: $Revision$\n";
diff --git a/ot_udp.h b/ot_udp.h
index 974c727..a71a3d9 100644
--- a/ot_udp.h
+++ b/ot_udp.h
@@ -6,7 +6,7 @@
6#ifndef OT_UDP_H__ 6#ifndef OT_UDP_H__
7#define OT_UDP_H__ 7#define OT_UDP_H__
8 8
9void udp_init( int64 sock, unsigned int worker_count ); 9void udp_init(int64 sock, unsigned int worker_count);
10int handle_udp6( int64 serversocket, struct ot_workstruct *ws ); 10int handle_udp6(int64 serversocket, struct ot_workstruct *ws);
11 11
12#endif 12#endif
diff --git a/ot_vector.c b/ot_vector.c
index 2a632b2..2bc07b5 100644
--- a/ot_vector.c
+++ b/ot_vector.c
@@ -4,39 +4,37 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <stddef.h>
8#include <stdint.h>
7#include <stdlib.h> 9#include <stdlib.h>
8#include <string.h> 10#include <string.h>
9#include <strings.h> 11#include <strings.h>
10#include <stdint.h>
11 12
12/* Opentracker */ 13/* Opentracker */
13#include "trackerlogic.h" 14#include "trackerlogic.h"
14#include "ot_vector.h"
15 15
16/* Libowfat */ 16/* Libowfat */
17#include "uint32.h"
18#include "uint16.h" 17#include "uint16.h"
18#include "uint32.h"
19 19
20static int vector_compare_peer(const void *peer1, const void *peer2 ) { 20static int vector_compare_peer6(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE6); }
21 return memcmp( peer1, peer2, OT_PEER_COMPARE_SIZE ); 21static int vector_compare_peer4(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE4); }
22}
23 22
24/* This function gives us a binary search that returns a pointer, even if 23/* This function gives us a binary search that returns a pointer, even if
25 no exact match is found. In that case it sets exactmatch 0 and gives 24 no exact match is found. In that case it sets exactmatch 0 and gives
26 calling functions the chance to insert data 25 calling functions the chance to insert data
27*/ 26*/
28void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size, 27void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch) {
29 size_t compare_size, int *exactmatch ) {
30 size_t interval = member_count; 28 size_t interval = member_count;
31 29
32 while( interval ) { 30 while (interval) {
33 uint8_t *lookat = ((uint8_t*)base) + member_size * ( interval / 2 ); 31 uint8_t *lookat = ((uint8_t *)base) + member_size * (interval / 2);
34 int cmp = memcmp( lookat, key, compare_size ); 32 int cmp = memcmp(lookat, key, compare_size);
35 if(cmp == 0 ) { 33 if (cmp == 0) {
36 base = lookat; 34 base = lookat;
37 break; 35 break;
38 } 36 }
39 if(cmp < 0) { 37 if (cmp < 0) {
40 base = lookat + member_size; 38 base = lookat + member_size;
41 interval--; 39 interval--;
42 } 40 }
@@ -44,13 +42,14 @@ void *binary_search( const void * const key, const void * base, const size_t mem
44 } 42 }
45 43
46 *exactmatch = interval; 44 *exactmatch = interval;
47 return (void*)base; 45 return (void *)base;
48} 46}
49 47
50static uint8_t vector_hash_peer( ot_peer *peer, int bucket_count ) { 48static uint8_t vector_hash_peer(ot_peer const *peer, size_t compare_size, int bucket_count) {
51 unsigned int hash = 5381, i = OT_PEER_COMPARE_SIZE; 49 unsigned int hash = 5381;
52 uint8_t *p = (uint8_t*)peer; 50 uint8_t *p = (uint8_t *)peer;
53 while( i-- ) hash += (hash<<5) + *(p++); 51 while (compare_size--)
52 hash += (hash << 5) + *(p++);
54 return hash % bucket_count; 53 return hash % bucket_count;
55} 54}
56 55
@@ -61,48 +60,62 @@ static uint8_t vector_hash_peer( ot_peer *peer, int bucket_count ) {
61 if it wasn't found in vector. Caller needs to check the passed "exactmatch" variable to see, whether an insert 60 if it wasn't found in vector. Caller needs to check the passed "exactmatch" variable to see, whether an insert
62 took place. If resizing the vector failed, NULL is returned, else the pointer to the object in vector. 61 took place. If resizing the vector failed, NULL is returned, else the pointer to the object in vector.
63*/ 62*/
64void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch ) { 63void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch) {
65 uint8_t *match = binary_search( key, vector->data, vector->size, member_size, compare_size, exactmatch ); 64 uint8_t *match = binary_search(key, vector->data, vector->size, member_size, compare_size, exactmatch);
66 65
67 if( *exactmatch ) return match; 66 if (*exactmatch)
67 return match;
68 68
69 if( vector->size + 1 > vector->space ) { 69 if (vector->size + 1 > vector->space) {
70 size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; 70 size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS;
71 uint8_t *new_data = realloc( vector->data, new_space * member_size ); 71 uint8_t *new_data = realloc(vector->data, new_space * member_size);
72 if( !new_data ) return NULL; 72 if (!new_data)
73 return NULL;
73 /* Adjust pointer if it moved by realloc */ 74 /* Adjust pointer if it moved by realloc */
74 match = new_data + (match - (uint8_t*)vector->data); 75 match = new_data + (match - (uint8_t *)vector->data);
75 76
76 vector->data = new_data; 77 vector->data = new_data;
77 vector->space = new_space; 78 vector->space = new_space;
78 } 79 }
79 memmove( match + member_size, match, ((uint8_t*)vector->data) + member_size * vector->size - match ); 80 memmove(match + member_size, match, ((uint8_t *)vector->data) + member_size * vector->size - match);
80 81
81 vector->size++; 82 vector->size++;
82 return match; 83 return match;
83} 84}
84 85
85ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exactmatch ) { 86ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch) {
86 ot_peer *match; 87 ot_peer *match, *end;
88 const size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
89 size_t match_to_end;
87 90
88 /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ 91 /* If space is zero but size is set, we're dealing with a list of vector->size buckets */
89 if( vector->space < vector->size ) 92 if (vector->space < vector->size)
90 vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, vector->size ); 93 vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size);
91 match = (ot_peer*)binary_search( peer, vector->data, vector->size, sizeof(ot_peer), OT_PEER_COMPARE_SIZE, exactmatch ); 94 match = binary_search(peer, vector->data, vector->size, peer_size, compare_size, exactmatch);
92 95
93 if( *exactmatch ) return match; 96 if (*exactmatch)
97 return match;
94 98
95 if( vector->size + 1 > vector->space ) { 99 /* This is the amount of bytes that needs to be pushed backwards by peer_size bytes to make room for new peer */
96 size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; 100 end = (ot_peer *)vector->data + vector->size * peer_size;
97 ot_peer *new_data = realloc( vector->data, new_space * sizeof(ot_peer) ); 101 match_to_end = end - match;
98 if( !new_data ) return NULL; 102
103 if (vector->size + 1 > vector->space) {
104 ptrdiff_t offset = match - (ot_peer *)vector->data;
105 size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS;
106 ot_peer *new_data = realloc(vector->data, new_space * peer_size);
107
108 if (!new_data)
109 return NULL;
99 /* Adjust pointer if it moved by realloc */ 110 /* Adjust pointer if it moved by realloc */
100 match = new_data + (match - (ot_peer*)vector->data); 111 match = new_data + offset;
101 112
102 vector->data = new_data; 113 vector->data = new_data;
103 vector->space = new_space; 114 vector->space = new_space;
104 } 115 }
105 memmove( match + 1, match, sizeof(ot_peer) * ( ((ot_peer*)vector->data) + vector->size - match ) ); 116
117 /* Here we're guaranteed to have enough space in vector to move the block of peers after insertion point */
118 memmove(match + peer_size, match, match_to_end);
106 119
107 vector->size++; 120 vector->size++;
108 return match; 121 return match;
@@ -113,126 +126,134 @@ ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exac
113 1 if a non-seeding peer was removed 126 1 if a non-seeding peer was removed
114 2 if a seeding peer was removed 127 2 if a seeding peer was removed
115*/ 128*/
116int vector_remove_peer( ot_vector *vector, ot_peer *peer ) { 129int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size) {
117 int exactmatch; 130 int exactmatch, was_seeder;
118 ot_peer *match, *end; 131 ot_peer *match, *end;
132 size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
119 133
120 if( !vector->size ) return 0; 134 if (!vector->size)
135 return 0;
121 136
122 /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ 137 /* If space is zero but size is set, we're dealing with a list of vector->size buckets */
123 if( vector->space < vector->size ) 138 if (vector->space < vector->size)
124 vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, vector->size ); 139 vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size);
125 140
126 end = ((ot_peer*)vector->data) + vector->size; 141 end = ((ot_peer *)vector->data) + peer_size * vector->size;
127 match = (ot_peer*)binary_search( peer, vector->data, vector->size, sizeof(ot_peer), OT_PEER_COMPARE_SIZE, &exactmatch ); 142 match = (ot_peer *)binary_search(peer, vector->data, vector->size, peer_size, compare_size, &exactmatch);
128 if( !exactmatch ) return 0; 143 if (!exactmatch)
144 return 0;
129 145
130 exactmatch = ( OT_PEERFLAG( match ) & PEER_FLAG_SEEDING ) ? 2 : 1; 146 was_seeder = (OT_PEERFLAG_D(match, peer_size) & PEER_FLAG_SEEDING) ? 2 : 1;
131 memmove( match, match + 1, sizeof(ot_peer) * ( end - match - 1 ) ); 147 memmove(match, match + peer_size, end - match - peer_size);
132 148
133 vector->size--; 149 vector->size--;
134 vector_fixup_peers( vector ); 150 vector_fixup_peers(vector, peer_size);
135 return exactmatch; 151 return was_seeder;
136} 152}
137 153
138void vector_remove_torrent( ot_vector *vector, ot_torrent *match ) { 154void vector_remove_torrent(ot_vector *vector, ot_torrent *match) {
139 ot_torrent *end = ((ot_torrent*)vector->data) + vector->size; 155 ot_torrent *end = ((ot_torrent *)vector->data) + vector->size;
140 156
141 if( !vector->size ) return; 157 if (!vector->size)
158 return;
142 159
143 /* If this is being called after a unsuccessful malloc() for peer_list 160 /* If this is being called after a unsuccessful malloc() for peer_list
144 in add_peer_to_torrent, match->peer_list actually might be NULL */ 161 in add_peer_to_torrent, match->peer_list actually might be NULL */
145 if( match->peer_list) free_peerlist( match->peer_list ); 162 free_peerlist(match->peer_list6);
163 free_peerlist(match->peer_list4);
146 164
147 memmove( match, match + 1, sizeof(ot_torrent) * ( end - match - 1 ) ); 165 memmove(match, match + 1, sizeof(ot_torrent) * (end - match - 1));
148 if( ( --vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) && ( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) { 166 if ((--vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) {
149 vector->space /= OT_VECTOR_SHRINK_RATIO; 167 vector->space /= OT_VECTOR_SHRINK_RATIO;
150 vector->data = realloc( vector->data, vector->space * sizeof( ot_torrent ) ); 168 vector->data = realloc(vector->data, vector->space * sizeof(ot_torrent));
151 } 169 }
152} 170}
153 171
154void vector_clean_list( ot_vector * vector, int num_buckets ) { 172void vector_clean_list(ot_vector *vector, int num_buckets) {
155 while( num_buckets-- ) 173 while (num_buckets--)
156 free( vector[num_buckets].data ); 174 free(vector[num_buckets].data);
157 free( vector ); 175 free(vector);
158 return; 176 return;
159} 177}
160 178
161void vector_redistribute_buckets( ot_peerlist * peer_list ) { 179void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size) {
162 int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1; 180 int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1;
163 ot_vector * bucket_list_new, * bucket_list_old = &peer_list->peers; 181 ot_vector *bucket_list_new, *bucket_list_old = &peer_list->peers;
182 int (*sort_func)(const void *, const void *) = peer_size == OT_PEER_SIZE6 ? &vector_compare_peer6 : &vector_compare_peer4;
164 183
165 if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { 184 if (OT_PEERLIST_HASBUCKETS(peer_list)) {
166 num_buckets_old = peer_list->peers.size; 185 num_buckets_old = peer_list->peers.size;
167 bucket_list_old = peer_list->peers.data; 186 bucket_list_old = peer_list->peers.data;
168 } 187 }
169 188
170 if( peer_list->peer_count < 255 ) 189 if (peer_list->peer_count < 255)
171 num_buckets_new = 1; 190 num_buckets_new = 1;
172 else if( peer_list->peer_count > 8192 ) 191 else if (peer_list->peer_count > 8192)
173 num_buckets_new = 64; 192 num_buckets_new = 64;
174 else if( peer_list->peer_count >= 512 && peer_list->peer_count < 4096 ) 193 else if (peer_list->peer_count >= 512 && peer_list->peer_count < 4096)
175 num_buckets_new = 16; 194 num_buckets_new = 16;
176 else if( peer_list->peer_count < 512 && num_buckets_old <= 16 ) 195 else if (peer_list->peer_count < 512 && num_buckets_old <= 16)
177 num_buckets_new = num_buckets_old; 196 num_buckets_new = num_buckets_old;
178 else if( peer_list->peer_count < 512 ) 197 else if (peer_list->peer_count < 512)
179 num_buckets_new = 1; 198 num_buckets_new = 1;
180 else if( peer_list->peer_count < 8192 && num_buckets_old > 1 ) 199 else if (peer_list->peer_count < 8192 && num_buckets_old > 1)
181 num_buckets_new = num_buckets_old; 200 num_buckets_new = num_buckets_old;
182 else 201 else
183 num_buckets_new = 16; 202 num_buckets_new = 16;
184 203
185 if( num_buckets_new == num_buckets_old ) 204 if (num_buckets_new == num_buckets_old)
186 return; 205 return;
187 206
188 /* Assume near perfect distribution */ 207 /* Assume near perfect distribution */
189 bucket_list_new = malloc( num_buckets_new * sizeof( ot_vector ) ); 208 bucket_list_new = malloc(num_buckets_new * sizeof(ot_vector));
190 if( !bucket_list_new) return; 209 if (!bucket_list_new)
191 bzero( bucket_list_new, num_buckets_new * sizeof( ot_vector ) ); 210 return;
211 bzero(bucket_list_new, num_buckets_new * sizeof(ot_vector));
192 212
193 tmp = peer_list->peer_count / num_buckets_new; 213 tmp = peer_list->peer_count / num_buckets_new;
194 bucket_size_new = OT_VECTOR_MIN_MEMBERS; 214 bucket_size_new = OT_VECTOR_MIN_MEMBERS;
195 while( bucket_size_new < tmp) 215 while (bucket_size_new < tmp)
196 bucket_size_new *= OT_VECTOR_GROW_RATIO; 216 bucket_size_new *= OT_VECTOR_GROW_RATIO;
197 217
198 /* preallocate vectors to hold all peers */ 218 /* preallocate vectors to hold all peers */
199 for( bucket=0; bucket<num_buckets_new; ++bucket ) { 219 for (bucket = 0; bucket < num_buckets_new; ++bucket) {
200 bucket_list_new[bucket].space = bucket_size_new; 220 bucket_list_new[bucket].space = bucket_size_new;
201 bucket_list_new[bucket].data = malloc( bucket_size_new * sizeof(ot_peer) ); 221 bucket_list_new[bucket].data = malloc(bucket_size_new * peer_size);
202 if( !bucket_list_new[bucket].data ) 222 if (!bucket_list_new[bucket].data)
203 return vector_clean_list( bucket_list_new, num_buckets_new ); 223 return vector_clean_list(bucket_list_new, num_buckets_new);
204 } 224 }
205 225
206 /* Now sort them into the correct bucket */ 226 /* Now sort them into the correct bucket */
207 for( bucket=0; bucket<num_buckets_old; ++bucket ) { 227 for (bucket = 0; bucket < num_buckets_old; ++bucket) {
208 ot_peer * peers_old = bucket_list_old[bucket].data, * peers_new; 228 ot_peer *peers_old = bucket_list_old[bucket].data;
209 int peer_count_old = bucket_list_old[bucket].size; 229 int peer_count_old = bucket_list_old[bucket].size;
210 while( peer_count_old-- ) { 230 while (peer_count_old--) {
211 ot_vector * bucket_dest = bucket_list_new; 231 ot_vector *bucket_dest = bucket_list_new;
212 if( num_buckets_new > 1 ) 232 if (num_buckets_new > 1)
213 bucket_dest += vector_hash_peer(peers_old, num_buckets_new); 233 bucket_dest += vector_hash_peer(peers_old, OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size), num_buckets_new);
214 if( bucket_dest->size + 1 > bucket_dest->space ) { 234 if (bucket_dest->size + 1 > bucket_dest->space) {
215 void * tmp = realloc( bucket_dest->data, sizeof(ot_peer) * OT_VECTOR_GROW_RATIO * bucket_dest->space ); 235 void *tmp = realloc(bucket_dest->data, peer_size * OT_VECTOR_GROW_RATIO * bucket_dest->space);
216 if( !tmp ) return vector_clean_list( bucket_list_new, num_buckets_new ); 236 if (!tmp)
237 return vector_clean_list(bucket_list_new, num_buckets_new);
217 bucket_dest->data = tmp; 238 bucket_dest->data = tmp;
218 bucket_dest->space *= OT_VECTOR_GROW_RATIO; 239 bucket_dest->space *= OT_VECTOR_GROW_RATIO;
219 } 240 }
220 peers_new = (ot_peer*)bucket_dest->data; 241 memcpy((ot_peer *)bucket_dest->data + peer_size * bucket_dest->size++, peers_old, peer_size);
221 memcpy(peers_new + bucket_dest->size++, peers_old++, sizeof(ot_peer)); 242 peers_old += peer_size;
222 } 243 }
223 } 244 }
224 245
225 /* Now sort each bucket to later allow bsearch */ 246 /* Now sort each bucket to later allow bsearch */
226 for( bucket=0; bucket<num_buckets_new; ++bucket ) 247 for (bucket = 0; bucket < num_buckets_new; ++bucket)
227 qsort( bucket_list_new[bucket].data, bucket_list_new[bucket].size, sizeof( ot_peer ), vector_compare_peer ); 248 qsort(bucket_list_new[bucket].data, bucket_list_new[bucket].size, peer_size, sort_func);
228 249
229 /* Everything worked fine. Now link new bucket_list to peer_list */ 250 /* Everything worked fine. Now link new bucket_list to peer_list */
230 if( OT_PEERLIST_HASBUCKETS( peer_list) ) 251 if (OT_PEERLIST_HASBUCKETS(peer_list))
231 vector_clean_list( (ot_vector*)peer_list->peers.data, peer_list->peers.size ); 252 vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size);
232 else 253 else
233 free( peer_list->peers.data ); 254 free(peer_list->peers.data);
234 255
235 if( num_buckets_new > 1 ) { 256 if (num_buckets_new > 1) {
236 peer_list->peers.data = bucket_list_new; 257 peer_list->peers.data = bucket_list_new;
237 peer_list->peers.size = num_buckets_new; 258 peer_list->peers.size = num_buckets_new;
238 peer_list->peers.space = 0; /* Magic marker for "is list of buckets" */ 259 peer_list->peers.space = 0; /* Magic marker for "is list of buckets" */
@@ -240,27 +261,24 @@ void vector_redistribute_buckets( ot_peerlist * peer_list ) {
240 peer_list->peers.data = bucket_list_new->data; 261 peer_list->peers.data = bucket_list_new->data;
241 peer_list->peers.size = bucket_list_new->size; 262 peer_list->peers.size = bucket_list_new->size;
242 peer_list->peers.space = bucket_list_new->space; 263 peer_list->peers.space = bucket_list_new->space;
243 free( bucket_list_new ); 264 free(bucket_list_new);
244 } 265 }
245} 266}
246 267
247void vector_fixup_peers( ot_vector * vector ) { 268void vector_fixup_peers(ot_vector *vector, size_t peer_size) {
248 int need_fix = 0; 269 int need_fix = 0;
249 270
250 if( !vector->size ) { 271 if (!vector->size) {
251 free( vector->data ); 272 free(vector->data);
252 vector->data = NULL; 273 vector->data = NULL;
253 vector->space = 0; 274 vector->space = 0;
254 return; 275 return;
255 } 276 }
256 277
257 while( ( vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) && 278 while ((vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) {
258 ( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) {
259 vector->space /= OT_VECTOR_SHRINK_RATIO; 279 vector->space /= OT_VECTOR_SHRINK_RATIO;
260 need_fix++; 280 need_fix++;
261 } 281 }
262 if( need_fix ) 282 if (need_fix)
263 vector->data = realloc( vector->data, vector->space * sizeof( ot_peer ) ); 283 vector->data = realloc(vector->data, vector->space * peer_size);
264} 284}
265
266const char *g_version_vector_c = "$Source$: $Revision$\n";
diff --git a/ot_vector.h b/ot_vector.h
index f7f87aa..8d41452 100644
--- a/ot_vector.h
+++ b/ot_vector.h
@@ -16,19 +16,21 @@
16#define OT_PEER_BUCKET_MAXCOUNT 256 16#define OT_PEER_BUCKET_MAXCOUNT 256
17 17
18typedef struct { 18typedef struct {
19 void *data; 19 void *data;
20 size_t size; 20 size_t size;
21 size_t space; 21 size_t space;
22} ot_vector; 22} ot_vector;
23 23
24void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size, 24void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch);
25 size_t compare_size, int *exactmatch ); 25void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch);
26void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch ); 26ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch);
27ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exactmatch );
28 27
29int vector_remove_peer( ot_vector *vector, ot_peer *peer ); 28int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size);
30void vector_remove_torrent( ot_vector *vector, ot_torrent *match ); 29void vector_remove_torrent(ot_vector *vector, ot_torrent *match);
31void vector_redistribute_buckets( ot_peerlist * peer_list ); 30
32void vector_fixup_peers( ot_vector * vector ); 31/* For ot_clean.c */
32void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size);
33void vector_fixup_peers(ot_vector *vector, size_t peer_size);
34void vector_clean_list(ot_vector *vector, int num_buckets);
33 35
34#endif 36#endif
diff --git a/proxy.c b/proxy.c
index 1f09777..9946240 100644
--- a/proxy.c
+++ b/proxy.c
@@ -4,33 +4,33 @@
4 $Id$ */ 4 $Id$ */
5 5
6/* System */ 6/* System */
7#include <arpa/inet.h>
8#include <ctype.h>
9#include <errno.h>
10#include <pthread.h>
11#include <pwd.h>
12#include <signal.h>
7#include <stdint.h> 13#include <stdint.h>
14#include <stdio.h>
8#include <stdlib.h> 15#include <stdlib.h>
9#include <string.h> 16#include <string.h>
10#include <arpa/inet.h>
11#include <sys/socket.h> 17#include <sys/socket.h>
12#include <unistd.h> 18#include <unistd.h>
13#include <errno.h>
14#include <signal.h>
15#include <stdio.h>
16#include <pwd.h>
17#include <ctype.h>
18#include <pthread.h>
19 19
20/* Libowfat */ 20/* Libowfat */
21#include "socket.h" 21#include "byte.h"
22#include "io.h" 22#include "io.h"
23#include "iob.h" 23#include "iob.h"
24#include "byte.h"
25#include "scan.h"
26#include "ip6.h" 24#include "ip6.h"
27#include "ndelay.h" 25#include "ndelay.h"
26#include "scan.h"
27#include "socket.h"
28 28
29/* Opentracker */ 29/* Opentracker */
30#include "trackerlogic.h"
31#include "ot_vector.h"
32#include "ot_mutex.h" 30#include "ot_mutex.h"
33#include "ot_stats.h" 31#include "ot_stats.h"
32#include "ot_vector.h"
33#include "trackerlogic.h"
34 34
35#ifndef WANT_SYNC_LIVE 35#ifndef WANT_SYNC_LIVE
36#define WANT_SYNC_LIVE 36#define WANT_SYNC_LIVE
@@ -40,28 +40,28 @@
40ot_ip6 g_serverip; 40ot_ip6 g_serverip;
41uint16_t g_serverport = 9009; 41uint16_t g_serverport = 9009;
42uint32_t g_tracker_id; 42uint32_t g_tracker_id;
43char groupip_1[4] = { 224,0,23,5 }; 43char groupip_1[4] = {224, 0, 23, 5};
44int g_self_pipe[2]; 44int g_self_pipe[2];
45 45
46/* If you have more than 10 peers, don't use this proxy 46/* If you have more than 10 peers, don't use this proxy
47 Use 20 slots for 10 peers to have room for 10 incoming connection slots 47 Use 20 slots for 10 peers to have room for 10 incoming connection slots
48 */ 48 */
49#define MAX_PEERS 20 49#define MAX_PEERS 20
50 50
51#define LIVESYNC_INCOMING_BUFFSIZE (256*256) 51#define LIVESYNC_INCOMING_BUFFSIZE (256 * 256)
52#define STREAMSYNC_OUTGOING_BUFFSIZE (256*256) 52#define STREAMSYNC_OUTGOING_BUFFSIZE (256 * 256)
53 53
54#define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480 54#define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480
55#define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer)+sizeof(ot_hash)) 55#define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer) + sizeof(ot_hash))
56#define LIVESYNC_MAXDELAY 15 /* seconds */ 56#define LIVESYNC_MAXDELAY 15 /* seconds */
57 57
58/* The amount of time a complete sync cycle should take */ 58/* The amount of time a complete sync cycle should take */
59#define OT_SYNC_INTERVAL_MINUTES 2 59#define OT_SYNC_INTERVAL_MINUTES 2
60 60
61/* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */ 61/* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */
62#define OT_SYNC_SLEEP ( ( ( OT_SYNC_INTERVAL_MINUTES ) * 60 * 1000000 ) / ( OT_BUCKET_COUNT ) ) 62#define OT_SYNC_SLEEP (((OT_SYNC_INTERVAL_MINUTES) * 60 * 1000000) / (OT_BUCKET_COUNT))
63 63
64enum { OT_SYNC_PEER }; 64enum { OT_SYNC_PEER4, OT_SYNC_PEER6 };
65enum { FLAG_SERVERSOCKET = 1 }; 65enum { FLAG_SERVERSOCKET = 1 };
66 66
67/* For incoming packets */ 67/* For incoming packets */
@@ -75,145 +75,153 @@ static uint8_t *g_peerbuffer_pos;
75static uint8_t *g_peerbuffer_highwater = g_peerbuffer_start + LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS; 75static uint8_t *g_peerbuffer_highwater = g_peerbuffer_start + LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS;
76static ot_time g_next_packet_time; 76static ot_time g_next_packet_time;
77 77
78static void * livesync_worker( void * args ); 78static void *livesync_worker(void *args);
79static void * streamsync_worker( void * args ); 79static void *streamsync_worker(void *args);
80static void livesync_proxytell( uint8_t prefix, uint8_t *info_hash, uint8_t *peer ); 80static void livesync_proxytell(uint8_t prefix, uint8_t *info_hash, uint8_t *peer);
81 81
82void exerr( char * message ) { 82void exerr(char *message) {
83 fprintf( stderr, "%s\n", message ); 83 fprintf(stderr, "%s\n", message);
84 exit( 111 ); 84 exit(111);
85} 85}
86 86
87void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ) { 87void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data) {
88 (void) event; 88 (void)event;
89 (void) proto; 89 (void)proto;
90 (void) event_data; 90 (void)event_data;
91} 91}
92 92
93void livesync_bind_mcast( ot_ip6 ip, uint16_t port) { 93void livesync_bind_mcast(ot_ip6 ip, uint16_t port) {
94 char tmpip[4] = {0,0,0,0}; 94 char tmpip[4] = {0, 0, 0, 0};
95 char *v4ip; 95 char *v4ip;
96 96
97 if( !ip6_isv4mapped(ip)) 97 if (!ip6_isv4mapped(ip))
98 exerr("v6 mcast support not yet available."); 98 exerr("v6 mcast support not yet available.");
99 v4ip = ip+12; 99 v4ip = ip + 12;
100 100
101 if( g_socket_in != -1 ) 101 if (g_socket_in != -1)
102 exerr("Error: Livesync listen ip specified twice."); 102 exerr("Error: Livesync listen ip specified twice.");
103 103
104 if( ( g_socket_in = socket_udp4( )) < 0) 104 if ((g_socket_in = socket_udp4()) < 0)
105 exerr("Error: Cant create live sync incoming socket." ); 105 exerr("Error: Cant create live sync incoming socket.");
106 ndelay_off(g_socket_in); 106 ndelay_off(g_socket_in);
107 107
108 if( socket_bind4_reuse( g_socket_in, tmpip, port ) == -1 ) 108 if (socket_bind4_reuse(g_socket_in, tmpip, port) == -1)
109 exerr("Error: Cant bind live sync incoming socket." ); 109 exerr("Error: Cant bind live sync incoming socket.");
110 110
111 if( socket_mcjoin4( g_socket_in, groupip_1, v4ip ) ) 111 if (socket_mcjoin4(g_socket_in, groupip_1, v4ip))
112 exerr("Error: Cant make live sync incoming socket join mcast group."); 112 exerr("Error: Cant make live sync incoming socket join mcast group.");
113 113
114 if( ( g_socket_out = socket_udp4()) < 0) 114 if ((g_socket_out = socket_udp4()) < 0)
115 exerr("Error: Cant create live sync outgoing socket." ); 115 exerr("Error: Cant create live sync outgoing socket.");
116 if( socket_bind4_reuse( g_socket_out, v4ip, port ) == -1 ) 116 if (socket_bind4_reuse(g_socket_out, v4ip, port) == -1)
117 exerr("Error: Cant bind live sync outgoing socket." ); 117 exerr("Error: Cant bind live sync outgoing socket.");
118 118
119 socket_mcttl4(g_socket_out, 1); 119 socket_mcttl4(g_socket_out, 1);
120 socket_mcloop4(g_socket_out, 1); 120 socket_mcloop4(g_socket_out, 1);
121} 121}
122 122
123size_t add_peer_to_torrent_proxy( ot_hash hash, ot_peer *peer ) { 123size_t add_peer_to_torrent_proxy(ot_hash hash, ot_peer *peer, size_t peer_size) {
124 int exactmatch; 124 int exactmatch;
125 ot_torrent *torrent; 125 ot_torrent *torrent;
126 ot_peer *peer_dest; 126 ot_peerlist *peer_list;
127 ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); 127 ot_peer *peer_dest;
128 128 ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash);
129 torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 129 size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
130 if( !torrent ) 130
131 torrent = vector_find_or_insert(torrents_list, (void *)hash, sizeof(ot_torrent), compare_size, &exactmatch);
132 if (!torrent)
131 return -1; 133 return -1;
132 134
133 if( !exactmatch ) { 135 if (!exactmatch) {
134 /* Create a new torrent entry, then */ 136 /* Create a new torrent entry, then */
135 memcpy( torrent->hash, hash, sizeof(ot_hash) ); 137 memcpy(torrent->hash, hash, sizeof(ot_hash));
136 138
137 if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { 139 if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) {
138 vector_remove_torrent( torrents_list, torrent ); 140 vector_remove_torrent(torrents_list, torrent);
139 mutex_bucket_unlock_by_hash( hash, 0 ); 141 mutex_bucket_unlock_by_hash(hash, 0);
140 return -1; 142 return -1;
141 } 143 }
142 144
143 byte_zero( torrent->peer_list, sizeof( ot_peerlist ) ); 145 byte_zero(torrent->peer_list6, sizeof(ot_peerlist));
146 byte_zero(torrent->peer_list4, sizeof(ot_peerlist));
144 } 147 }
145 148
149 peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
150
146 /* Check for peer in torrent */ 151 /* Check for peer in torrent */
147 peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), peer, &exactmatch ); 152 peer_dest = vector_find_or_insert_peer(&(peer_list->peers), peer, peer_size, &exactmatch);
148 if( !peer_dest ) { 153 if (!peer_dest) {
149 mutex_bucket_unlock_by_hash( hash, 0 ); 154 mutex_bucket_unlock_by_hash(hash, 0);
150 return -1; 155 return -1;
151 } 156 }
152 /* Tell peer that it's fresh */ 157 /* Tell peer that it's fresh */
153 OT_PEERTIME( peer ) = 0; 158 OT_PEERTIME(peer, peer_size) = 0;
154 159
155 /* If we hadn't had a match create peer there */ 160 /* If we hadn't had a match create peer there */
156 if( !exactmatch ) { 161 if (!exactmatch) {
157 torrent->peer_list->peer_count++; 162 peer_list->peer_count++;
158 if( OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) 163 if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_SEEDING)
159 torrent->peer_list->seed_count++; 164 peer_list->seed_count++;
160 } 165 }
161 memcpy( peer_dest, peer, sizeof(ot_peer) ); 166 memcpy(peer_dest, peer, peer_size);
162 mutex_bucket_unlock_by_hash( hash, 0 ); 167 mutex_bucket_unlock_by_hash(hash, 0);
163 return 0; 168 return 0;
164} 169}
165 170
166size_t remove_peer_from_torrent_proxy( ot_hash hash, ot_peer *peer ) { 171size_t remove_peer_from_torrent_proxy(ot_hash hash, ot_peer *peer, size_t peer_size) {
167 int exactmatch; 172 int exactmatch;
168 ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); 173 ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash);
169 ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 174 ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
170 175
171 if( exactmatch ) { 176 if (exactmatch) {
172 ot_peerlist *peer_list = torrent->peer_list; 177 ot_peerlist *peer_list = peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
173 switch( vector_remove_peer( &peer_list->peers, peer ) ) { 178 switch (vector_remove_peer(&peer_list->peers, peer, peer_size)) {
174 case 2: peer_list->seed_count--; /* Fall throughs intended */ 179 case 2:
175 case 1: peer_list->peer_count--; /* Fall throughs intended */ 180 peer_list->seed_count--; /* Intentional fallthrough */
176 default: break; 181 case 1:
182 peer_list->peer_count--; /* Intentional fallthrough */
183 default:
184 break;
177 } 185 }
178 } 186 }
179 187
180 mutex_bucket_unlock_by_hash( hash, 0 ); 188 mutex_bucket_unlock_by_hash(hash, 0);
181 return 0; 189 return 0;
182} 190}
183 191
184void free_peerlist( ot_peerlist *peer_list ) { 192void free_peerlist(ot_peerlist *peer_list) {
185 if( peer_list->peers.data ) { 193 if (peer_list->peers.data) {
186 if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { 194 if (OT_PEERLIST_HASBUCKETS(peer_list)) {
187 ot_vector *bucket_list = (ot_vector*)(peer_list->peers.data); 195 ot_vector *bucket_list = (ot_vector *)(peer_list->peers.data);
188 196
189 while( peer_list->peers.size-- ) 197 while (peer_list->peers.size--)
190 free( bucket_list++->data ); 198 free(bucket_list++->data);
191 } 199 }
192 free( peer_list->peers.data ); 200 free(peer_list->peers.data);
193 } 201 }
194 free( peer_list ); 202 free(peer_list);
195} 203}
196 204
197static void livesync_handle_peersync( ssize_t datalen ) { 205static void livesync_handle_peersync(ssize_t datalen, size_t peer_size) {
198 int off = sizeof( g_tracker_id ) + sizeof( uint32_t ); 206 int off = sizeof(g_tracker_id) + sizeof(uint32_t);
199 207
200 fprintf( stderr, "." ); 208 fprintf(stderr, ".");
201 209
202 while( off + (ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ) <= datalen ) { 210 while ((ssize_t)(off + sizeof(ot_hash) + peer_size) <= datalen) {
203 ot_peer *peer = (ot_peer*)(g_inbuffer + off + sizeof(ot_hash)); 211 ot_peer *peer = (ot_peer *)(g_inbuffer + off + sizeof(ot_hash));
204 ot_hash *hash = (ot_hash*)(g_inbuffer + off); 212 ot_hash *hash = (ot_hash *)(g_inbuffer + off);
205 213
206 if( OT_PEERFLAG(peer) & PEER_FLAG_STOPPED ) 214 if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_STOPPED)
207 remove_peer_from_torrent_proxy( *hash, peer ); 215 remove_peer_from_torrent_proxy(*hash, peer, peer_size);
208 else 216 else
209 add_peer_to_torrent_proxy( *hash, peer ); 217 add_peer_to_torrent_proxy(*hash, peer, peer_size);
210 218
211 off += sizeof( ot_hash ) + sizeof( ot_peer ); 219 off += sizeof(ot_hash) + peer_size;
212 } 220 }
213} 221}
214 222
215int usage( char *self ) { 223int usage(char *self) {
216 fprintf( stderr, "Usage: %s -L <livesync_iface_ip> -l <listenip>:<listenport> -c <connectip>:<connectport>\n", self ); 224 fprintf(stderr, "Usage: %s -L <livesync_iface_ip> -l <listenip>:<listenport> -c <connectip>:<connectport>\n", self);
217 return 0; 225 return 0;
218} 226}
219 227
@@ -228,115 +236,115 @@ enum {
228 FLAG_MASK = 0x07 236 FLAG_MASK = 0x07
229}; 237};
230 238
231#define PROXYPEER_NEEDSCONNECT(flag) ((flag)==FLAG_OUTGOING) 239#define PROXYPEER_NEEDSCONNECT(flag) ((flag) == FLAG_OUTGOING)
232#define PROXYPEER_ISCONNECTED(flag) (((flag)&FLAG_MASK)==FLAG_CONNECTED) 240#define PROXYPEER_ISCONNECTED(flag) (((flag) & FLAG_MASK) == FLAG_CONNECTED)
233#define PROXYPEER_SETDISCONNECTED(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_DISCONNECTED) 241#define PROXYPEER_SETDISCONNECTED(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_DISCONNECTED)
234#define PROXYPEER_SETCONNECTING(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_CONNECTING) 242#define PROXYPEER_SETCONNECTING(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_CONNECTING)
235#define PROXYPEER_SETWAITTRACKERID(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_WAITTRACKERID) 243#define PROXYPEER_SETWAITTRACKERID(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_WAITTRACKERID)
236#define PROXYPEER_SETCONNECTED(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_CONNECTED) 244#define PROXYPEER_SETCONNECTED(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_CONNECTED)
237 245
238typedef struct { 246typedef struct {
239 int state; /* Whether we want to connect, how far our handshake is, etc. */ 247 int state; /* Whether we want to connect, how far our handshake is, etc. */
240 ot_ip6 ip; /* The peer to connect to */ 248 ot_ip6 ip; /* The peer to connect to */
241 uint16_t port; /* The peers port */ 249 uint16_t port; /* The peers port */
242 uint8_t indata[8192*16]; /* Any data not processed yet */ 250 uint8_t indata[8192 * 16]; /* Any data not processed yet */
243 size_t indata_length; /* Length of unprocessed data */ 251 size_t indata_length; /* Length of unprocessed data */
244 uint32_t tracker_id; /* How the other end greeted */ 252 uint32_t tracker_id; /* How the other end greeted */
245 int64 fd; /* A file handle, if connected, <= 0 is disconnected (0 initially, -1 else) */ 253 int64 fd; /* A file handle, if connected, <= 0 is disconnected (0 initially, -1 else) */
246 io_batch outdata; /* The iobatch containing our sync data */ 254 io_batch outdata; /* The iobatch containing our sync data */
247 255
248 size_t packet_tcount; /* Number of unprocessed torrents in packet we currently receive */ 256 size_t packet_tcount; /* Number of unprocessed torrents in packet we currently receive */
249 uint8_t packet_tprefix; /* Prefix byte for all torrents in current packet */ 257 uint8_t packet_tprefix; /* Prefix byte for all torrents in current packet */
250 uint8_t packet_type; /* Type of current packet */ 258 uint8_t packet_type; /* Type of current packet */
251 uint32_t packet_tid; /* Tracker id for current packet */ 259 uint32_t packet_tid; /* Tracker id for current packet */
252 260
253} proxy_peer; 261} proxy_peer;
254static void process_indata( proxy_peer * peer ); 262static void process_indata(proxy_peer *peer);
255 263
256void reset_info_block( proxy_peer * peer ) { 264void reset_info_block(proxy_peer *peer) {
257 peer->indata_length = 0; 265 peer->indata_length = 0;
258 peer->tracker_id = 0; 266 peer->tracker_id = 0;
259 peer->fd = -1; 267 peer->fd = -1;
260 peer->packet_tcount = 0; 268 peer->packet_tcount = 0;
261 iob_reset( &peer->outdata ); 269 iob_reset(&peer->outdata);
262 PROXYPEER_SETDISCONNECTED( peer->state ); 270 PROXYPEER_SETDISCONNECTED(peer->state);
263} 271}
264 272
265/* Number of connections to peers 273/* Number of connections to peers
266 * If a peer's IP is set, we try to reconnect, when the connection drops 274 * If a peer's IP is set, we try to reconnect, when the connection drops
267 * If we already have a connected tracker_id in our records for an _incoming_ connection, drop it 275 * If we already have a connected tracker_id in our records for an _incoming_ connection, drop it
268 * Multiple connections to/from the same ip are okay, if tracker_id doesn't match 276 * Multiple connections to/from the same ip are okay, if tracker_id doesn't match
269 * Reconnect attempts occur only twice a minute 277 * Reconnect attempts occur only twice a minute
270*/ 278 */
271static int g_connection_count; 279static int g_connection_count;
272static ot_time g_connection_reconn; 280static ot_time g_connection_reconn;
273static proxy_peer g_connections[MAX_PEERS]; 281static proxy_peer g_connections[MAX_PEERS];
274 282
275static void handle_reconnects( void ) { 283static void handle_reconnects(void) {
276 int i; 284 int i;
277 for( i=0; i<g_connection_count; ++i ) 285 for (i = 0; i < g_connection_count; ++i)
278 if( PROXYPEER_NEEDSCONNECT( g_connections[i].state ) ) { 286 if (PROXYPEER_NEEDSCONNECT(g_connections[i].state)) {
279 int64 newfd = socket_tcp6( ); 287 int64 newfd = socket_tcp6();
280 fprintf( stderr, "(Re)connecting to peer..." ); 288 fprintf(stderr, "(Re)connecting to peer...");
281 if( newfd < 0 ) continue; /* No socket for you */ 289 if (newfd < 0)
290 continue; /* No socket for you */
282 io_fd(newfd); 291 io_fd(newfd);
283 if( socket_bind6_reuse(newfd,g_serverip,g_serverport,0) ) { 292 if (socket_bind6_reuse(newfd, g_serverip, g_serverport, 0)) {
284 io_close( newfd ); 293 io_close(newfd);
285 continue; 294 continue;
286 } 295 }
287 if( socket_connect6(newfd,g_connections[i].ip,g_connections[i].port,0) == -1 && 296 if (socket_connect6(newfd, g_connections[i].ip, g_connections[i].port, 0) == -1 && errno != EINPROGRESS && errno != EWOULDBLOCK) {
288 errno != EINPROGRESS && errno != EWOULDBLOCK ) {
289 close(newfd); 297 close(newfd);
290 continue; 298 continue;
291 } 299 }
292 io_wantwrite(newfd); /* So we will be informed when it is connected */ 300 io_wantwrite(newfd); /* So we will be informed when it is connected */
293 io_setcookie(newfd,g_connections+i); 301 io_setcookie(newfd, g_connections + i);
294 302
295 /* Prepare connection info block */ 303 /* Prepare connection info block */
296 reset_info_block( g_connections+i ); 304 reset_info_block(g_connections + i);
297 g_connections[i].fd = newfd; 305 g_connections[i].fd = newfd;
298 PROXYPEER_SETCONNECTING( g_connections[i].state ); 306 PROXYPEER_SETCONNECTING(g_connections[i].state);
299 } 307 }
300 g_connection_reconn = time(NULL) + 30; 308 g_connection_reconn = time(NULL) + 30;
301} 309}
302 310
303/* Handle incoming connection requests, check against whitelist */ 311/* Handle incoming connection requests, check against whitelist */
304static void handle_accept( int64 serversocket ) { 312static void handle_accept(int64 serversocket) {
305 int64 newfd; 313 int64 newfd;
306 ot_ip6 ip; 314 ot_ip6 ip;
307 uint16 port; 315 uint16 port;
308 316
309 while( ( newfd = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) { 317 while ((newfd = socket_accept6(serversocket, ip, &port, NULL)) != -1) {
310 318
311 /* XXX some access control */ 319 /* XXX some access control */
312 320
313 /* Put fd into a non-blocking mode */ 321 /* Put fd into a non-blocking mode */
314 io_nonblock( newfd ); 322 io_nonblock(newfd);
315 323
316 if( !io_fd( newfd ) ) 324 if (!io_fd(newfd))
317 io_close( newfd ); 325 io_close(newfd);
318 else { 326 else {
319 /* Find a new home for our incoming connection */ 327 /* Find a new home for our incoming connection */
320 int i; 328 int i;
321 for( i=0; i<MAX_PEERS; ++i ) 329 for (i = 0; i < MAX_PEERS; ++i)
322 if( g_connections[i].state == FLAG_DISCONNECTED ) 330 if (g_connections[i].state == FLAG_DISCONNECTED)
323 break; 331 break;
324 if( i == MAX_PEERS ) { 332 if (i == MAX_PEERS) {
325 fprintf( stderr, "No room for incoming connection." ); 333 fprintf(stderr, "No room for incoming connection.");
326 close( newfd ); 334 close(newfd);
327 continue; 335 continue;
328 } 336 }
329 337
330 /* Prepare connection info block */ 338 /* Prepare connection info block */
331 reset_info_block( g_connections+i ); 339 reset_info_block(g_connections + i);
332 PROXYPEER_SETCONNECTING( g_connections[i].state ); 340 PROXYPEER_SETCONNECTING(g_connections[i].state);
333 g_connections[i].port = port; 341 g_connections[i].port = port;
334 g_connections[i].fd = newfd; 342 g_connections[i].fd = newfd;
335 343
336 io_setcookie( newfd, g_connections + i ); 344 io_setcookie(newfd, g_connections + i);
337 345
338 /* We expect the connecting side to begin with its tracker_id */ 346 /* We expect the connecting side to begin with its tracker_id */
339 io_wantread( newfd ); 347 io_wantread(newfd);
340 } 348 }
341 } 349 }
342 350
@@ -344,117 +352,116 @@ static void handle_accept( int64 serversocket ) {
344} 352}
345 353
346/* New sync data on the stream */ 354/* New sync data on the stream */
347static void handle_read( int64 peersocket ) { 355static void handle_read(int64 peersocket) {
348 int i; 356 int i;
349 int64 datalen; 357 int64 datalen;
350 uint32_t tracker_id; 358 uint32_t tracker_id;
351 proxy_peer *peer = io_getcookie( peersocket ); 359 proxy_peer *peer = io_getcookie(peersocket);
352 360
353 if( !peer ) { 361 if (!peer) {
354 /* Can't happen ;) */ 362 /* Can't happen ;) */
355 io_close( peersocket ); 363 io_close(peersocket);
356 return; 364 return;
357 } 365 }
358 switch( peer->state & FLAG_MASK ) { 366 switch (peer->state & FLAG_MASK) {
359 case FLAG_DISCONNECTED: 367 case FLAG_DISCONNECTED:
360 io_close( peersocket ); 368 io_close(peersocket);
361 break; /* Shouldnt happen */ 369 break; /* Shouldnt happen */
362 case FLAG_CONNECTING: 370 case FLAG_CONNECTING:
363 case FLAG_WAITTRACKERID: 371 case FLAG_WAITTRACKERID:
364 /* We want at least the first four bytes to come at once, to avoid keeping extra states (for now) 372 /* We want at least the first four bytes to come at once, to avoid keeping extra states (for now)
365 This also catches 0 bytes reads == EOF and negative values, denoting connection errors */ 373 This also catches 0 bytes reads == EOF and negative values, denoting connection errors */
366 if( io_tryread( peersocket, (void*)&tracker_id, sizeof( tracker_id ) ) != sizeof( tracker_id ) ) 374 if (io_tryread(peersocket, (void *)&tracker_id, sizeof(tracker_id)) != sizeof(tracker_id))
367 goto close_socket; 375 goto close_socket;
368 376
369 /* See, if we already have a connection to that peer */ 377 /* See, if we already have a connection to that peer */
370 for( i=0; i<MAX_PEERS; ++i ) 378 for (i = 0; i < MAX_PEERS; ++i)
371 if( ( g_connections[i].state & FLAG_MASK ) == FLAG_CONNECTED && 379 if ((g_connections[i].state & FLAG_MASK) == FLAG_CONNECTED && g_connections[i].tracker_id == tracker_id) {
372 g_connections[i].tracker_id == tracker_id ) { 380 fprintf(stderr, "Peer already connected. Closing connection.\n");
373 fprintf( stderr, "Peer already connected. Closing connection.\n" );
374 goto close_socket; 381 goto close_socket;
375 } 382 }
376 383
377 /* Also no need for soliloquy */ 384 /* Also no need for soliloquy */
378 if( tracker_id == g_tracker_id ) 385 if (tracker_id == g_tracker_id)
379 goto close_socket; 386 goto close_socket;
380 387
381 /* The new connection is good, send our tracker_id on incoming connections */ 388 /* The new connection is good, send our tracker_id on incoming connections */
382 if( peer->state == FLAG_CONNECTING ) 389 if (peer->state == FLAG_CONNECTING)
383 if( io_trywrite( peersocket, (void*)&g_tracker_id, sizeof( g_tracker_id ) ) != sizeof( g_tracker_id ) ) 390 if (io_trywrite(peersocket, (void *)&g_tracker_id, sizeof(g_tracker_id)) != sizeof(g_tracker_id))
384 goto close_socket; 391 goto close_socket;
385 392
386 peer->tracker_id = tracker_id; 393 peer->tracker_id = tracker_id;
387 PROXYPEER_SETCONNECTED( peer->state ); 394 PROXYPEER_SETCONNECTED(peer->state);
388 395
389 if( peer->state & FLAG_OUTGOING ) 396 if (peer->state & FLAG_OUTGOING)
390 fprintf( stderr, "succeeded.\n" ); 397 fprintf(stderr, "succeeded.\n");
391 else 398 else
392 fprintf( stderr, "Incoming connection successful.\n" ); 399 fprintf(stderr, "Incoming connection successful.\n");
393 400
394 break; 401 break;
395close_socket: 402 close_socket:
396 fprintf( stderr, "Handshake incomplete, closing socket\n" ); 403 fprintf(stderr, "Handshake incomplete, closing socket\n");
397 io_close( peersocket ); 404 io_close(peersocket);
398 reset_info_block( peer ); 405 reset_info_block(peer);
399 break; 406 break;
400 case FLAG_CONNECTED: 407 case FLAG_CONNECTED:
401 /* Here we acutally expect data from peer 408 /* Here we acutally expect data from peer
402 indata_length should be less than 20+256*7 bytes, for incomplete torrent entries */ 409 indata_length should be less than 20+256*7 bytes, for incomplete torrent entries */
403 datalen = io_tryread( peersocket, (void*)(peer->indata + peer->indata_length), sizeof( peer->indata ) - peer->indata_length ); 410 datalen = io_tryread(peersocket, (void *)(peer->indata + peer->indata_length), sizeof(peer->indata) - peer->indata_length);
404 if( !datalen || datalen < -1 ) { 411 if (!datalen || datalen < -1) {
405 fprintf( stderr, "Connection closed by remote peer.\n" ); 412 fprintf(stderr, "Connection closed by remote peer.\n");
406 io_close( peersocket ); 413 io_close(peersocket);
407 reset_info_block( peer ); 414 reset_info_block(peer);
408 } else if( datalen > 0 ) { 415 } else if (datalen > 0) {
409 peer->indata_length += datalen; 416 peer->indata_length += datalen;
410 process_indata( peer ); 417 process_indata(peer);
411 } 418 }
412 break; 419 break;
413 } 420 }
414} 421}
415 422
416/* Can write new sync data to the stream */ 423/* Can write new sync data to the stream */
417static void handle_write( int64 peersocket ) { 424static void handle_write(int64 peersocket) {
418 proxy_peer *peer = io_getcookie( peersocket ); 425 proxy_peer *peer = io_getcookie(peersocket);
419 426
420 if( !peer ) { 427 if (!peer) {
421 /* Can't happen ;) */ 428 /* Can't happen ;) */
422 io_close( peersocket ); 429 io_close(peersocket);
423 return; 430 return;
424 } 431 }
425 432
426 switch( peer->state & FLAG_MASK ) { 433 switch (peer->state & FLAG_MASK) {
427 case FLAG_DISCONNECTED: 434 case FLAG_DISCONNECTED:
428 default: /* Should not happen */ 435 default: /* Should not happen */
429 io_close( peersocket ); 436 io_close(peersocket);
430 break; 437 break;
431 case FLAG_CONNECTING: 438 case FLAG_CONNECTING:
432 /* Ensure that the connection is established and handle connection error */ 439 /* Ensure that the connection is established and handle connection error */
433 if( peer->state & FLAG_OUTGOING && !socket_connected( peersocket ) ) { 440 if (peer->state & FLAG_OUTGOING && !socket_connected(peersocket)) {
434 fprintf( stderr, "failed\n" ); 441 fprintf(stderr, "failed\n");
435 reset_info_block( peer ); 442 reset_info_block(peer);
436 io_close( peersocket ); 443 io_close(peersocket);
437 break; 444 break;
438 } 445 }
439 446
440 if( io_trywrite( peersocket, (void*)&g_tracker_id, sizeof( g_tracker_id ) ) == sizeof( g_tracker_id ) ) { 447 if (io_trywrite(peersocket, (void *)&g_tracker_id, sizeof(g_tracker_id)) == sizeof(g_tracker_id)) {
441 PROXYPEER_SETWAITTRACKERID( peer->state ); 448 PROXYPEER_SETWAITTRACKERID(peer->state);
442 io_dontwantwrite( peersocket ); 449 io_dontwantwrite(peersocket);
443 io_wantread( peersocket ); 450 io_wantread(peersocket);
444 } else { 451 } else {
445 fprintf( stderr, "Handshake incomplete, closing socket\n" ); 452 fprintf(stderr, "Handshake incomplete, closing socket\n");
446 io_close( peersocket ); 453 io_close(peersocket);
447 reset_info_block( peer ); 454 reset_info_block(peer);
448 } 455 }
449 break; 456 break;
450 case FLAG_CONNECTED: 457 case FLAG_CONNECTED:
451 switch( iob_send( peersocket, &peer->outdata ) ) { 458 switch (iob_send(peersocket, &peer->outdata)) {
452 case 0: /* all data sent */ 459 case 0: /* all data sent */
453 io_dontwantwrite( peersocket ); 460 io_dontwantwrite(peersocket);
454 break; 461 break;
455 case -3: /* an error occured */ 462 case -3: /* an error occured */
456 io_close( peersocket ); 463 io_close(peersocket);
457 reset_info_block( peer ); 464 reset_info_block(peer);
458 break; 465 break;
459 default: /* Normal operation or eagain */ 466 default: /* Normal operation or eagain */
460 break; 467 break;
@@ -469,286 +476,324 @@ static void server_mainloop() {
469 int64 sock; 476 int64 sock;
470 477
471 /* inlined livesync_init() */ 478 /* inlined livesync_init() */
472 memset( g_peerbuffer_start, 0, sizeof( g_peerbuffer_start ) ); 479 memset(g_peerbuffer_start, 0, sizeof(g_peerbuffer_start));
473 g_peerbuffer_pos = g_peerbuffer_start; 480 g_peerbuffer_pos = g_peerbuffer_start;
474 memcpy( g_peerbuffer_pos, &g_tracker_id, sizeof( g_tracker_id ) ); 481 memcpy(g_peerbuffer_pos, &g_tracker_id, sizeof(g_tracker_id));
475 uint32_pack_big( (char*)g_peerbuffer_pos + sizeof( g_tracker_id ), OT_SYNC_PEER); 482 uint32_pack_big((char *)g_peerbuffer_pos + sizeof(g_tracker_id), OT_SYNC_PEER);
476 g_peerbuffer_pos += sizeof( g_tracker_id ) + sizeof( uint32_t); 483 g_peerbuffer_pos += sizeof(g_tracker_id) + sizeof(uint32_t);
477 g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY; 484 g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY;
478 485
479 while(1) { 486 while (1) {
480 /* See, if we need to connect to anyone */ 487 /* See if we need to connect to anyone */
481 if( time(NULL) > g_connection_reconn ) 488 if (time(NULL) > g_connection_reconn)
482 handle_reconnects( ); 489 handle_reconnects();
483 490
484 /* Wait for io events until next approx reconn check time */ 491 /* Wait for io events until next approx reconn check time */
485 io_waituntil2( 30*1000 ); 492 io_waituntil2(30 * 1000);
486 493
487 /* Loop over readable sockets */ 494 /* Loop over readable sockets */
488 while( ( sock = io_canread( ) ) != -1 ) { 495 while ((sock = io_canread()) != -1) {
489 const void *cookie = io_getcookie( sock ); 496 const void *cookie = io_getcookie(sock);
490 if( (uintptr_t)cookie == FLAG_SERVERSOCKET ) 497 if ((uintptr_t)cookie == FLAG_SERVERSOCKET)
491 handle_accept( sock ); 498 handle_accept(sock);
492 else 499 else
493 handle_read( sock ); 500 handle_read(sock);
494 } 501 }
495 502
496 /* Loop over writable sockets */ 503 /* Loop over writable sockets */
497 while( ( sock = io_canwrite( ) ) != -1 ) 504 while ((sock = io_canwrite()) != -1)
498 handle_write( sock ); 505 handle_write(sock);
499 506
500 livesync_ticker( ); 507 livesync_ticker();
501 } 508 }
502} 509}
503 510
504static void panic( const char *routine ) { 511static void panic(const char *routine) {
505 fprintf( stderr, "%s: %s\n", routine, strerror(errno) ); 512 fprintf(stderr, "%s: %s\n", routine, strerror(errno));
506 exit( 111 ); 513 exit(111);
507} 514}
508 515
509static int64_t ot_try_bind( ot_ip6 ip, uint16_t port ) { 516static int64_t ot_try_bind(ot_ip6 ip, uint16_t port) {
510 int64 sock = socket_tcp6( ); 517 int64 sock = socket_tcp6();
511 518
512 if( socket_bind6_reuse( sock, ip, port, 0 ) == -1 ) 519 if (socket_bind6_reuse(sock, ip, port, 0) == -1)
513 panic( "socket_bind6_reuse" ); 520 panic("socket_bind6_reuse");
514 521
515 if( socket_listen( sock, SOMAXCONN) == -1 ) 522 if (socket_listen(sock, SOMAXCONN) == -1)
516 panic( "socket_listen" ); 523 panic("socket_listen");
517 524
518 if( !io_fd( sock ) ) 525 if (!io_fd(sock))
519 panic( "io_fd" ); 526 panic("io_fd");
520 527
521 io_setcookie( sock, (void*)FLAG_SERVERSOCKET ); 528 io_setcookie(sock, (void *)FLAG_SERVERSOCKET);
522 io_wantread( sock ); 529 io_wantread(sock);
523 return sock; 530 return sock;
524} 531}
525 532
526 533static int scan_ip6_port(const char *src, ot_ip6 ip, uint16 *port) {
527static int scan_ip6_port( const char *src, ot_ip6 ip, uint16 *port ) {
528 const char *s = src; 534 const char *s = src;
529 int off, bracket = 0; 535 int off, bracket = 0;
530 while( isspace(*s) ) ++s; 536 while (isspace(*s))
531 if( *s == '[' ) ++s, ++bracket; /* for v6 style notation */ 537 ++s;
532 if( !(off = scan_ip6( s, ip ) ) ) 538 if (*s == '[')
539 ++s, ++bracket; /* for v6 style notation */
540 if (!(off = scan_ip6(s, ip)))
533 return 0; 541 return 0;
534 s += off; 542 s += off;
535 if( *s == 0 || isspace(*s)) return s-src; 543 if (*s == 0 || isspace(*s))
536 if( *s == ']' && bracket ) ++s; 544 return s - src;
537 if( !ip6_isv4mapped(ip)){ 545 if (*s == ']' && bracket)
538 if( ( bracket && *(s) != ':' ) || ( *(s) != '.' ) ) return 0; 546 ++s;
547 if (!ip6_isv4mapped(ip)) {
548 if ((bracket && *(s) != ':') || (*(s) != '.'))
549 return 0;
539 s++; 550 s++;
540 } else { 551 } else {
541 if( *(s++) != ':' ) return 0; 552 if (*(s++) != ':')
553 return 0;
542 } 554 }
543 if( !(off = scan_ushort (s, port ) ) ) 555 if (!(off = scan_ushort(s, port)))
544 return 0; 556 return 0;
545 return off+s-src; 557 return off + s - src;
546} 558}
547 559
548int main( int argc, char **argv ) { 560int main(int argc, char **argv) {
549 static pthread_t sync_in_thread_id; 561 static pthread_t sync_in_thread_id;
550 static pthread_t sync_out_thread_id; 562 static pthread_t sync_out_thread_id;
551 ot_ip6 serverip; 563 ot_ip6 serverip;
552 uint16_t tmpport; 564 uint16_t tmpport;
553 int scanon = 1, lbound = 0, sbound = 0; 565 int scanon = 1, lbound = 0, sbound = 0;
554 566
555 srandom( time(NULL) ); 567 srandom(time(NULL));
568#ifdef WANT_ARC4RANDOM
569 g_tracker_id = arc4random();
570#else
556 g_tracker_id = random(); 571 g_tracker_id = random();
557 noipv6=1; 572#endif
558 573
559 while( scanon ) { 574 while (scanon) {
560 switch( getopt( argc, argv, ":l:c:L:h" ) ) { 575 switch (getopt(argc, argv, ":l:c:L:h")) {
561 case -1: scanon = 0; break; 576 case -1:
577 scanon = 0;
578 break;
562 case 'l': 579 case 'l':
563 tmpport = 0; 580 tmpport = 0;
564 if( !scan_ip6_port( optarg, serverip, &tmpport ) || !tmpport ) { usage( argv[0] ); exit( 1 ); } 581 if (!scan_ip6_port(optarg, serverip, &tmpport) || !tmpport) {
565 ot_try_bind( serverip, tmpport ); 582 usage(argv[0]);
583 exit(1);
584 }
585 ot_try_bind(serverip, tmpport);
566 ++sbound; 586 ++sbound;
567 break; 587 break;
568 case 'c': 588 case 'c':
569 if( g_connection_count > MAX_PEERS / 2 ) exerr( "Connection limit exceeded.\n" ); 589 if (g_connection_count > MAX_PEERS / 2)
590 exerr("Connection limit exceeded.\n");
570 tmpport = 0; 591 tmpport = 0;
571 if( !scan_ip6_port( optarg, 592 if (!scan_ip6_port(optarg, g_connections[g_connection_count].ip, &g_connections[g_connection_count].port) || !g_connections[g_connection_count].port) {
572 g_connections[g_connection_count].ip, 593 usage(argv[0]);
573 &g_connections[g_connection_count].port ) || 594 exit(1);
574 !g_connections[g_connection_count].port ) { usage( argv[0] ); exit( 1 ); } 595 }
575 g_connections[g_connection_count++].state = FLAG_OUTGOING; 596 g_connections[g_connection_count++].state = FLAG_OUTGOING;
576 break; 597 break;
577 case 'L': 598 case 'L':
578 tmpport = 9696; 599 tmpport = 9696;
579 if( !scan_ip6_port( optarg, serverip, &tmpport ) || !tmpport ) { usage( argv[0] ); exit( 1 ); } 600 if (!scan_ip6_port(optarg, serverip, &tmpport) || !tmpport) {
580 livesync_bind_mcast( serverip, tmpport); ++lbound; break; 601 usage(argv[0]);
602 exit(1);
603 }
604 livesync_bind_mcast(serverip, tmpport);
605 ++lbound;
606 break;
581 default: 607 default:
582 case '?': usage( argv[0] ); exit( 1 ); 608 case '?':
609 usage(argv[0]);
610 exit(1);
583 } 611 }
584 } 612 }
585 613
586 if( !lbound ) exerr( "No livesync port bound." ); 614 if (!lbound)
587 if( !g_connection_count && !sbound ) exerr( "No streamsync port bound." ); 615 exerr("No livesync port bound.");
588 pthread_create( &sync_in_thread_id, NULL, livesync_worker, NULL ); 616 if (!g_connection_count && !sbound)
589 pthread_create( &sync_out_thread_id, NULL, streamsync_worker, NULL ); 617 exerr("No streamsync port bound.");
618 pthread_create(&sync_in_thread_id, NULL, livesync_worker, NULL);
619 pthread_create(&sync_out_thread_id, NULL, streamsync_worker, NULL);
590 620
591 server_mainloop(); 621 server_mainloop();
592 return 0; 622 return 0;
593} 623}
594 624
595static void * streamsync_worker( void * args ) { 625static void *streamsync_worker(void *args) {
596 (void)args; 626 (void)args;
597 while( 1 ) { 627 while (1) {
598 int bucket; 628 int bucket;
599 /* For each bucket... */ 629 /* For each bucket... */
600 for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 630 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
601 /* Get exclusive access to that bucket */ 631 /* Get exclusive access to that bucket */
602 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 632 ot_vector *torrents_list = mutex_bucket_lock(bucket);
603 size_t tor_offset, count_def = 0, count_one = 0, count_two = 0, count_peers = 0; 633 size_t tor_offset, count_def = 0, count_one = 0, count_two = 0, count_peers = 0;
604 size_t mem, mem_a = 0, mem_b = 0; 634 size_t mem, mem_a = 0, mem_b = 0;
605 uint8_t *ptr = 0, *ptr_a, *ptr_b, *ptr_c; 635 uint8_t *ptr = 0, *ptr_a, *ptr_b, *ptr_c;
606 636
607 if( !torrents_list->size ) goto unlock_continue; 637 if (!torrents_list->size)
638 goto unlock_continue;
608 639
609 /* For each torrent in this bucket.. */ 640 /* For each torrent in this bucket.. */
610 for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { 641 for (tor_offset = 0; tor_offset < torrents_list->size; ++tor_offset) {
611 /* Address torrents members */ 642 /* Address torrents members */
612 ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[tor_offset] ).peer_list; 643 ot_peerlist *peer_list = (((ot_torrent *)(torrents_list->data))[tor_offset]).peer_list;
613 switch( peer_list->peer_count ) { 644 switch (peer_list->peer_count) {
614 case 2: count_two++; break; 645 case 2:
615 case 1: count_one++; break; 646 count_two++;
616 case 0: break; 647 break;
617 default: count_def++; 648 case 1:
618 count_peers += peer_list->peer_count; 649 count_one++;
650 break;
651 case 0:
652 break;
653 default:
654 count_def++;
655 count_peers += peer_list->peer_count;
619 } 656 }
620 } 657 }
621 658
622 /* Maximal memory requirement: max 3 blocks, max torrents * 20 + max peers * 7 */ 659 /* Maximal memory requirement: max 3 blocks, max torrents * 20 + max peers * 7 */
623 mem = 3 * ( 1 + 1 + 2 ) + ( count_one + count_two ) * ( 19 + 1 ) + count_def * ( 19 + 8 ) + 660 mem = 3 * (1 + 1 + 2) + (count_one + count_two) * (19 + 1) + count_def * (19 + 8) + (count_one + 2 * count_two + count_peers) * 7;
624 ( count_one + 2 * count_two + count_peers ) * 7; 661
625 662 fprintf(stderr, "Mem: %zd\n", mem);
626 fprintf( stderr, "Mem: %zd\n", mem ); 663
627 664 ptr = ptr_a = ptr_b = ptr_c = malloc(mem);
628 ptr = ptr_a = ptr_b = ptr_c = malloc( mem ); 665 if (!ptr)
629 if( !ptr ) goto unlock_continue; 666 goto unlock_continue;
630 667
631 if( count_one > 4 || !count_def ) { 668 if (count_one > 4 || !count_def) {
632 mem_a = 1 + 1 + 2 + count_one * ( 19 + 7 ); 669 mem_a = 1 + 1 + 2 + count_one * (19 + 7);
633 ptr_b += mem_a; ptr_c += mem_a; 670 ptr_b += mem_a;
634 ptr_a[0] = 1; /* Offset 0: packet type 1 */ 671 ptr_c += mem_a;
635 ptr_a[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ 672 ptr_a[0] = 1; /* Offset 0: packet type 1 */
636 ptr_a[2] = count_one >> 8; 673 ptr_a[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */
637 ptr_a[3] = count_one & 255; 674 ptr_a[2] = count_one >> 8;
638 ptr_a += 4; 675 ptr_a[3] = count_one & 255;
676 ptr_a += 4;
639 } else 677 } else
640 count_def += count_one; 678 count_def += count_one;
641 679
642 if( count_two > 4 || !count_def ) { 680 if (count_two > 4 || !count_def) {
643 mem_b = 1 + 1 + 2 + count_two * ( 19 + 14 ); 681 mem_b = 1 + 1 + 2 + count_two * (19 + 14);
644 ptr_c += mem_b; 682 ptr_c += mem_b;
645 ptr_b[0] = 2; /* Offset 0: packet type 2 */ 683 ptr_b[0] = 2; /* Offset 0: packet type 2 */
646 ptr_b[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ 684 ptr_b[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */
647 ptr_b[2] = count_two >> 8; 685 ptr_b[2] = count_two >> 8;
648 ptr_b[3] = count_two & 255; 686 ptr_b[3] = count_two & 255;
649 ptr_b += 4; 687 ptr_b += 4;
650 } else 688 } else
651 count_def += count_two; 689 count_def += count_two;
652 690
653 if( count_def ) { 691 if (count_def) {
654 ptr_c[0] = 0; /* Offset 0: packet type 0 */ 692 ptr_c[0] = 0; /* Offset 0: packet type 0 */
655 ptr_c[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ 693 ptr_c[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */
656 ptr_c[2] = count_def >> 8; 694 ptr_c[2] = count_def >> 8;
657 ptr_c[3] = count_def & 255; 695 ptr_c[3] = count_def & 255;
658 ptr_c += 4; 696 ptr_c += 4;
659 } 697 }
660 698
661 /* For each torrent in this bucket.. */ 699 /* For each torrent in this bucket.. */
662 for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { 700 for (tor_offset = 0; tor_offset < torrents_list->size; ++tor_offset) {
663 /* Address torrents members */ 701 /* Address torrents members */
664 ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + tor_offset; 702 ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + tor_offset;
665 ot_peerlist *peer_list = torrent->peer_list; 703 ot_peerlist *peer_list = torrent->peer_list;
666 ot_peer *peers = (ot_peer*)(peer_list->peers.data); 704 ot_peer *peers = (ot_peer *)(peer_list->peers.data);
667 uint8_t **dst; 705 uint8_t **dst;
668 706
669 /* Determine destination slot */ 707 /* Determine destination slot */
670 count_peers = peer_list->peer_count; 708 count_peers = peer_list->peer_count;
671 switch( count_peers ) { 709 switch (count_peers) {
672 case 0: continue; 710 case 0:
673 case 1: dst = mem_a ? &ptr_a : &ptr_c; break; 711 continue;
674 case 2: dst = mem_b ? &ptr_b : &ptr_c; break; 712 case 1:
675 default: dst = &ptr_c; break; 713 dst = mem_a ? &ptr_a : &ptr_c;
714 break;
715 case 2:
716 dst = mem_b ? &ptr_b : &ptr_c;
717 break;
718 default:
719 dst = &ptr_c;
720 break;
676 } 721 }
677 722
678 /* Copy tail of info_hash, advance pointer */ 723 /* Copy tail of info_hash, advance pointer */
679 memcpy( *dst, ((uint8_t*)torrent->hash) + 1, sizeof( ot_hash ) - 1); 724 memcpy(*dst, ((uint8_t *)torrent->hash) + 1, sizeof(ot_hash) - 1);
680 *dst += sizeof( ot_hash ) - 1; 725 *dst += sizeof(ot_hash) - 1;
681 726
682 /* Encode peer count */ 727 /* Encode peer count */
683 if( dst == &ptr_c ) 728 if (dst == &ptr_c)
684 while( count_peers ) { 729 while (count_peers) {
685 if( count_peers <= 0x7f ) 730 if (count_peers <= 0x7f)
686 *(*dst)++ = count_peers; 731 *(*dst)++ = count_peers;
687 else 732 else
688 *(*dst)++ = 0x80 | ( count_peers & 0x7f ); 733 *(*dst)++ = 0x80 | (count_peers & 0x7f);
689 count_peers >>= 7; 734 count_peers >>= 7;
690 } 735 }
691 736
692 /* Copy peers */ 737 /* Copy peers */
693 count_peers = peer_list->peer_count; 738 count_peers = peer_list->peer_count;
694 while( count_peers-- ) { 739 while (count_peers--) {
695 memcpy( *dst, peers++, OT_IP_SIZE + 3 ); 740 memcpy(*dst, peers++, OT_IP_SIZE + 3);
696 *dst += OT_IP_SIZE + 3; 741 *dst += OT_IP_SIZE + 3;
697 } 742 }
698 free_peerlist(peer_list); 743 free_peerlist(peer_list);
699 } 744 }
700 745
701 free( torrents_list->data ); 746 free(torrents_list->data);
702 memset( torrents_list, 0, sizeof(*torrents_list ) ); 747 memset(torrents_list, 0, sizeof(*torrents_list));
703unlock_continue: 748 unlock_continue:
704 mutex_bucket_unlock( bucket, 0 ); 749 mutex_bucket_unlock(bucket, 0);
705 750
706 if( ptr ) { 751 if (ptr) {
707 int i; 752 int i;
708 753
709 if( ptr_b > ptr_c ) ptr_c = ptr_b; 754 if (ptr_b > ptr_c)
710 if( ptr_a > ptr_c ) ptr_c = ptr_a; 755 ptr_c = ptr_b;
756 if (ptr_a > ptr_c)
757 ptr_c = ptr_a;
711 mem = ptr_c - ptr; 758 mem = ptr_c - ptr;
712 759
713 for( i=0; i < MAX_PEERS; ++i ) { 760 for (i = 0; i < MAX_PEERS; ++i) {
714 if( PROXYPEER_ISCONNECTED(g_connections[i].state) ) { 761 if (PROXYPEER_ISCONNECTED(g_connections[i].state)) {
715 void *tmp = malloc( mem ); 762 void *tmp = malloc(mem);
716 if( tmp ) { 763 if (tmp) {
717 memcpy( tmp, ptr, mem ); 764 memcpy(tmp, ptr, mem);
718 iob_addbuf_free( &g_connections[i].outdata, tmp, mem ); 765 iob_addbuf_free(&g_connections[i].outdata, tmp, mem);
719 io_wantwrite( g_connections[i].fd ); 766 io_wantwrite(g_connections[i].fd);
720 } 767 }
721 } 768 }
722 } 769 }
723 770
724 free( ptr ); 771 free(ptr);
725 } 772 }
726 usleep( OT_SYNC_SLEEP ); 773 usleep(OT_SYNC_SLEEP);
727 } 774 }
728 } 775 }
729 return 0; 776 return 0;
730} 777}
731 778
732static void livesync_issue_peersync( ) { 779static void livesync_issue_peersync() {
733 socket_send4(g_socket_out, (char*)g_peerbuffer_start, g_peerbuffer_pos - g_peerbuffer_start, 780 socket_send4(g_socket_out, (char *)g_peerbuffer_start, g_peerbuffer_pos - g_peerbuffer_start, groupip_1, LIVESYNC_PORT);
734 groupip_1, LIVESYNC_PORT); 781 g_peerbuffer_pos = g_peerbuffer_start + sizeof(g_tracker_id) + sizeof(uint32_t);
735 g_peerbuffer_pos = g_peerbuffer_start + sizeof( g_tracker_id ) + sizeof( uint32_t );
736 g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY; 782 g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY;
737} 783}
738 784
739void livesync_ticker( ) { 785void livesync_ticker() {
740 /* livesync_issue_peersync sets g_next_packet_time */ 786 /* livesync_issue_peersync sets g_next_packet_time */
741 if( time(NULL) > g_next_packet_time && 787 if (time(NULL) > g_next_packet_time && g_peerbuffer_pos > g_peerbuffer_start + sizeof(g_tracker_id))
742 g_peerbuffer_pos > g_peerbuffer_start + sizeof( g_tracker_id ) )
743 livesync_issue_peersync(); 788 livesync_issue_peersync();
744} 789}
745 790
746static void livesync_proxytell( uint8_t prefix, uint8_t *info_hash, uint8_t *peer ) { 791static void livesync_proxytell(uint8_t prefix, uint8_t *info_hash, uint8_t *peer) {
747// unsigned int i; 792 // unsigned int i;
748 793
749 *g_peerbuffer_pos = prefix; 794 *g_peerbuffer_pos = prefix;
750 memcpy( g_peerbuffer_pos + 1, info_hash, sizeof(ot_hash) - 1 ); 795 memcpy(g_peerbuffer_pos + 1, info_hash, sizeof(ot_hash) - 1);
751 memcpy( g_peerbuffer_pos + sizeof(ot_hash), peer, sizeof(ot_peer) - 1 ); 796 memcpy(g_peerbuffer_pos + sizeof(ot_hash), peer, sizeof(ot_peer) - 1);
752 797
753#if 0 798#if 0
754 /* Dump info_hash */ 799 /* Dump info_hash */
@@ -763,77 +808,84 @@ static void livesync_proxytell( uint8_t prefix, uint8_t *info_hash, uint8_t *pee
763#endif 808#endif
764 g_peerbuffer_pos += sizeof(ot_peer); 809 g_peerbuffer_pos += sizeof(ot_peer);
765 810
766 if( g_peerbuffer_pos >= g_peerbuffer_highwater ) 811 if (g_peerbuffer_pos >= g_peerbuffer_highwater)
767 livesync_issue_peersync(); 812 livesync_issue_peersync();
768} 813}
769 814
770static void process_indata( proxy_peer * peer ) { 815static void process_indata(proxy_peer *peer) {
771 size_t consumed, peers; 816 size_t consumed, peers;
772 uint8_t *data = peer->indata, *hash; 817 uint8_t *data = peer->indata, *hash;
773 uint8_t *dataend = data + peer->indata_length; 818 uint8_t *dataend = data + peer->indata_length;
774 819
775 while( 1 ) { 820 while (1) {
776 /* If we're not inside of a packet, make a new one */ 821 /* If we're not inside of a packet, make a new one */
777 if( !peer->packet_tcount ) { 822 if (!peer->packet_tcount) {
778 /* Ensure the header is complete or postpone processing */ 823 /* Ensure the header is complete or postpone processing */
779 if( data + 4 > dataend ) break; 824 if (data + 4 > dataend)
780 peer->packet_type = data[0]; 825 break;
781 peer->packet_tprefix = data[1]; 826 peer->packet_type = data[0];
782 peer->packet_tcount = data[2] * 256 + data[3]; 827 peer->packet_tprefix = data[1];
783 data += 4; 828 peer->packet_tcount = data[2] * 256 + data[3];
784printf( "type: %hhu, prefix: %02X, torrentcount: %zd\n", peer->packet_type, peer->packet_tprefix, peer->packet_tcount ); 829 data += 4;
830 printf("type: %hhu, prefix: %02X, torrentcount: %zd\n", peer->packet_type, peer->packet_tprefix, peer->packet_tcount);
785 } 831 }
786 832
787 /* Ensure size for a minimal torrent block */ 833 /* Ensure size for a minimal torrent block */
788 if( data + sizeof(ot_hash) + OT_IP_SIZE + 3 > dataend ) break; 834 if (data + sizeof(ot_hash) + OT_IP_SIZE + 3 > dataend)
835 break;
789 836
790 /* Advance pointer to peer count or peers */ 837 /* Advance pointer to peer count or peers */
791 hash = data; 838 hash = data;
792 data += sizeof(ot_hash) - 1; 839 data += sizeof(ot_hash) - 1;
793 840
794 /* Type 0 has peer count encoded before each peers */ 841 /* Type 0 has peer count encoded before each peers */
795 peers = peer->packet_type; 842 peers = peer->packet_type;
796 if( !peers ) { 843 if (!peers) {
797 int shift = 0; 844 int shift = 0;
798 do peers |= ( 0x7f & *data ) << ( 7 * shift ); 845 do
799 while ( *(data++) & 0x80 && shift++ < 6 ); 846 peers |= (0x7f & *data) << (7 * shift);
847 while (*(data++) & 0x80 && shift++ < 6);
800 } 848 }
801#if 0 849#if 0
802printf( "peers: %zd\n", peers ); 850printf( "peers: %zd\n", peers );
803#endif 851#endif
804 /* Ensure enough data being read to hold all peers */ 852 /* Ensure enough data being read to hold all peers */
805 if( data + (OT_IP_SIZE + 3) * peers > dataend ) { 853 if (data + (OT_IP_SIZE + 3) * peers > dataend) {
806 data = hash; 854 data = hash;
807 break; 855 break;
808 } 856 }
809 while( peers-- ) { 857 while (peers--) {
810 livesync_proxytell( peer->packet_tprefix, hash, data ); 858 livesync_proxytell(peer->packet_tprefix, hash, data);
811 data += OT_IP_SIZE + 3; 859 data += OT_IP_SIZE + 3;
812 } 860 }
813 --peer->packet_tcount; 861 --peer->packet_tcount;
814 } 862 }
815 863
816 consumed = data - peer->indata; 864 consumed = data - peer->indata;
817 memmove( peer->indata, data, peer->indata_length - consumed ); 865 memmove(peer->indata, data, peer->indata_length - consumed);
818 peer->indata_length -= consumed; 866 peer->indata_length -= consumed;
819} 867}
820 868
821static void * livesync_worker( void * args ) { 869static void *livesync_worker(void *args) {
822 (void)args; 870 (void)args;
823 while( 1 ) { 871 while (1) {
824 ot_ip6 in_ip; uint16_t in_port; 872 ot_ip6 in_ip;
825 size_t datalen = socket_recv4(g_socket_in, (char*)g_inbuffer, LIVESYNC_INCOMING_BUFFSIZE, 12+(char*)in_ip, &in_port); 873 uint16_t in_port;
874 size_t datalen = socket_recv4(g_socket_in, (char *)g_inbuffer, LIVESYNC_INCOMING_BUFFSIZE, 12 + (char *)in_ip, &in_port);
826 875
827 /* Expect at least tracker id and packet type */ 876 /* Expect at least tracker id and packet type */
828 if( datalen <= (ssize_t)(sizeof( g_tracker_id ) + sizeof( uint32_t )) ) 877 if (datalen <= (ssize_t)(sizeof(g_tracker_id) + sizeof(uint32_t)))
829 continue; 878 continue;
830 if( !memcmp( g_inbuffer, &g_tracker_id, sizeof( g_tracker_id ) ) ) { 879 if (!memcmp(g_inbuffer, &g_tracker_id, sizeof(g_tracker_id))) {
831 /* drop packet coming from ourselves */ 880 /* drop packet coming from ourselves */
832 continue; 881 continue;
833 } 882 }
834 switch( uint32_read_big( sizeof( g_tracker_id ) + (char*)g_inbuffer ) ) { 883 switch (uint32_read_big((char *)g_inbuffer + sizeof(g_tracker_id))) {
835 case OT_SYNC_PEER: 884 case OT_SYNC_PEER4:
836 livesync_handle_peersync( datalen ); 885 livesync_handle_peersync(datalen, OT_PEER_SIZE4);
886 break;
887 case OT_SYNC_PEER6:
888 livesync_handle_peersync(datalen, OT_PEER_SIZE6);
837 break; 889 break;
838 default: 890 default:
839 // fprintf( stderr, "Received an unknown live sync packet type %u.\n", uint32_read_big( sizeof( g_tracker_id ) + (char*)g_inbuffer ) ); 891 // fprintf( stderr, "Received an unknown live sync packet type %u.\n", uint32_read_big( sizeof( g_tracker_id ) + (char*)g_inbuffer ) );
diff --git a/scan_urlencoded_query.c b/scan_urlencoded_query.c
index a4f89c2..38d544a 100644
--- a/scan_urlencoded_query.c
+++ b/scan_urlencoded_query.c
@@ -45,37 +45,45 @@ static const unsigned char is_unreserved[256] = {
45 45
46/* Do a fast nibble to hex representation conversion */ 46/* Do a fast nibble to hex representation conversion */
47static unsigned char fromhex(unsigned char x) { 47static unsigned char fromhex(unsigned char x) {
48 x-='0'; if( x<=9) return x; 48 x -= '0';
49 x&=~0x20; x-='A'-'0'; 49 if (x <= 9)
50 if( x<6 ) return x+10; 50 return x;
51 x &= ~0x20;
52 x -= 'A' - '0';
53 if (x < 6)
54 return x + 10;
51 return 0xff; 55 return 0xff;
52} 56}
53 57
54/* Skip the value of a param=value pair */ 58/* Skip the value of a param=value pair */
55void scan_urlencoded_skipvalue( char **string ) { 59void scan_urlencoded_skipvalue(char **string) {
56 const unsigned char* s=*(const unsigned char**) string; 60 const unsigned char *s = *(const unsigned char **)string;
57 unsigned char f; 61 unsigned char f;
58 62
59 /* Since we are asked to skip the 'value', we assume to stop at 63 /* Since we are asked to skip the 'value', we assume to stop at
60 terminators for a 'value' string position */ 64 terminators for a 'value' string position */
61 while( ( f = is_unreserved[ *s++ ] ) & SCAN_SEARCHPATH_VALUE ); 65 while ((f = is_unreserved[*s++]) & SCAN_SEARCHPATH_VALUE)
66 ;
62 67
63 /* If we stopped at a hard terminator like \0 or \n, make the 68 /* If we stopped at a hard terminator like \0 or \n, make the
64 next scan_urlencoded_query encounter it again */ 69 next scan_urlencoded_query encounter it again */
65 if( f & SCAN_SEARCHPATH_TERMINATOR ) --s; 70 if (f & SCAN_SEARCHPATH_TERMINATOR)
71 --s;
66 72
67 *string = (char*)s; 73 *string = (char *)s;
68} 74}
69 75
70int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags) { 76int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags) {
71 char *deststring = *string; 77 char *deststring = *string;
72 ssize_t match_length = scan_urlencoded_query(string, deststring, flags ); 78 ssize_t match_length = scan_urlencoded_query(string, deststring, flags);
73 79
74 if( match_length < 0 ) return match_length; 80 if (match_length < 0)
75 if( match_length == 0 ) return -3; 81 return match_length;
82 if (match_length == 0)
83 return -3;
76 84
77 while( keywords->key ) { 85 while (keywords->key) {
78 if( !strncmp( keywords->key, deststring, match_length ) && !keywords->key[match_length] ) 86 if (!strncmp(keywords->key, deststring, match_length) && !keywords->key[match_length])
79 return keywords->value; 87 return keywords->value;
80 keywords++; 88 keywords++;
81 } 89 }
@@ -84,60 +92,73 @@ int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCH
84} 92}
85 93
86ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_FLAG flags) { 94ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_FLAG flags) {
87 const unsigned char* s=*(const unsigned char**) string; 95 const unsigned char *s = *(const unsigned char **)string;
88 unsigned char *d = (unsigned char*)deststring; 96 unsigned char *d = (unsigned char *)deststring;
89 unsigned char b, c; 97 unsigned char b, c;
90 98
91 /* This is the main decoding loop. 99 /* This is the main decoding loop.
92 'flag' determines, which characters are non-terminating in current context 100 'flag' determines, which characters are non-terminating in current context
93 (ie. stop at '=' and '&' if scanning for a 'param'; stop at '?' if scanning for the path ) 101 (ie. stop at '=' and '&' if scanning for a 'param'; stop at '?' if scanning for the path )
94 */ 102 */
95 while( is_unreserved[ c = *s++ ] & flags ) { 103 while (is_unreserved[c = *s++] & flags) {
96 104
97 /* When encountering an url escaped character, try to decode */ 105 /* When encountering an url escaped character, try to decode */
98 if( c=='%') { 106 if (c == '%') {
99 if( ( b = fromhex(*s++) ) == 0xff ) return -1; 107 if ((b = fromhex(*s++)) == 0xff)
100 if( ( c = fromhex(*s++) ) == 0xff ) return -1; 108 return -1;
101 c|=(b<<4); 109 if ((c = fromhex(*s++)) == 0xff)
110 return -1;
111 c |= (b << 4);
102 } 112 }
103 113
104 /* Write (possibly decoded) character to output */ 114 /* Write (possibly decoded) character to output */
105 *d++ = c; 115 *d++ = c;
106 } 116 }
107 117
108 switch( c ) { 118 switch (c) {
109 case 0: case '\r': case '\n': case ' ': 119 case 0:
120 case '\r':
121 case '\n':
122 case ' ':
110 /* If we started scanning on a hard terminator, indicate we've finished */ 123 /* If we started scanning on a hard terminator, indicate we've finished */
111 if( d == (unsigned char*)deststring ) return -2; 124 if (d == (unsigned char *)deststring)
125 return -2;
112 126
113 /* Else make the next call to scan_urlencoded_param encounter it again */ 127 /* Else make the next call to scan_urlencoded_param encounter it again */
114 --s; 128 --s;
115 break; 129 break;
116 case '?': 130 case '?':
117 if( flags != SCAN_PATH ) return -1; 131 if (flags != SCAN_PATH)
132 return -1;
118 break; 133 break;
119 case '=': 134 case '=':
120 if( flags != SCAN_SEARCHPATH_PARAM ) return -1; 135 if (flags != SCAN_SEARCHPATH_PARAM)
136 return -1;
121 break; 137 break;
122 case '&': 138 case '&':
123 if( flags == SCAN_PATH ) return -1; 139 if (flags == SCAN_PATH)
124 if( flags == SCAN_SEARCHPATH_PARAM ) --s; 140 return -1;
141 if (flags == SCAN_SEARCHPATH_PARAM)
142 --s;
125 break; 143 break;
126 default: 144 default:
127 return -1; 145 return -1;
128 } 146 }
129 147
130 *string = (char *)s; 148 *string = (char *)s;
131 return d - (unsigned char*)deststring; 149 return d - (unsigned char *)deststring;
132} 150}
133 151
134ssize_t scan_fixed_int( char *data, size_t len, int *tmp ) { 152ssize_t scan_fixed_int(char *data, size_t len, int *tmp) {
135 int minus = 0; 153 int minus = 0;
136 *tmp = 0; 154 *tmp = 0;
137 if( *data == '-' ) --len, ++data, ++minus; 155 if (*data == '-')
138 while( (len > 0) && (*data >= '0') && (*data <= '9') ) { --len; *tmp = 10**tmp + *data++-'0'; } 156 --len, ++data, ++minus;
139 if( minus ) *tmp = -*tmp; 157 while ((len > 0) && (*data >= '0') && (*data <= '9')) {
158 --len;
159 *tmp = 10 * *tmp + *data++ - '0';
160 }
161 if (minus)
162 *tmp = -*tmp;
140 return len; 163 return len;
141} 164}
142
143const char *g_version_scan_urlencoded_query_c = "$Source$: $Revision$\n";
diff --git a/scan_urlencoded_query.h b/scan_urlencoded_query.h
index 06b91f5..74246e7 100644
--- a/scan_urlencoded_query.h
+++ b/scan_urlencoded_query.h
@@ -38,18 +38,18 @@ ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_F
38 or -2 for terminator found 38 or -2 for terminator found
39 or -3 for no keyword matched 39 or -3 for no keyword matched
40 */ 40 */
41int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags); 41int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags);
42 42
43/* string in: pointer to value of a param=value pair to skip 43/* string in: pointer to value of a param=value pair to skip
44 out: pointer to next scan position on return 44 out: pointer to next scan position on return
45*/ 45*/
46void scan_urlencoded_skipvalue( char **string ); 46void scan_urlencoded_skipvalue(char **string);
47 47
48/* data pointer to len chars of string 48/* data pointer to len chars of string
49 len length of chars in data to parse 49 len length of chars in data to parse
50 number number to receive result 50 number number to receive result
51 returns number of bytes not parsed, mostly !=0 means fail 51 returns number of bytes not parsed, mostly !=0 means fail
52 */ 52 */
53ssize_t scan_fixed_int( char *data, size_t len, int *number ); 53ssize_t scan_fixed_int(char *data, size_t len, int *number);
54 54
55#endif 55#endif
diff --git a/tests/testsuite2.sh b/tests/testsuite2.sh
index c9a5a6a..da5181b 100644
--- a/tests/testsuite2.sh
+++ b/tests/testsuite2.sh
@@ -2,13 +2,21 @@
2 2
3while true; do 3while true; do
4 request_string="GET /announce?info_hash=012345678901234567\ 4 request_string="GET /announce?info_hash=012345678901234567\
5%$(printf %02X $(( $RANDOM & 0xff )) )\ 5$(printf %02X $(( $RANDOM & 0xff )) )\
6%$(printf %02X $(( $RANDOM & 0xff )) )\ 6&ip=$(( $RANDOM & 0xff )).17.13.15&port=$(( $RANDOM & 0xff )) HTTP/1.0"
7&ip=$(( $RANDOM & 0xff )).17.13.15&port=$(( $RANDOM & 0xff )) HTTP/1.0\n" 7
8 8# echo $request_string
9 echo $request_string 9# echo
10 echo 10 printf "%s\n\n" "$request_string" | nc 84.200.61.9 6969 | hexdump -C
11 echo $request_string | nc 23.23.23.237 6969 >/dev/null 11
12 echo 12 request_string="GET /announce?info_hash=012345678901234567\
13$(printf %02X $(( $RANDOM & 0xff )) )\
14&ip=2001:1608:6:27::$(( $RANDOM & 0xff ))&port=$(( $RANDOM & 0xff )) HTTP/1.0"
15 printf "%s\n\n" "$request_string" | nc 2001:1608:6:27::9 6969 | hexdump -C
16 printf "%s\n\n" "$request_string"
17
18 request_string="GET /scrape?info_hash=012345678901234567\
19$(printf %02X $(( $RANDOM & 0xff )) ) HTTP/1.0"
20 printf "%s\n\n" "$request_string" | nc 2001:1608:6:27::9 6969 | hexdump -C
13 21
14done 22done
diff --git a/trackerlogic.c b/trackerlogic.c
index 310466c..04df544 100644
--- a/trackerlogic.c
+++ b/trackerlogic.c
@@ -4,454 +4,595 @@
4 $id$ */ 4 $id$ */
5 5
6/* System */ 6/* System */
7#include <stdlib.h>
8#include <string.h>
9#include <stdio.h>
10#include <arpa/inet.h> 7#include <arpa/inet.h>
11#include <unistd.h>
12#include <errno.h> 8#include <errno.h>
13#include <stdint.h> 9#include <stdint.h>
10#include <stdio.h>
11#include <stdlib.h>
12#include <string.h>
13#include <unistd.h>
14 14
15/* Libowfat */ 15/* Libowfat */
16#include "array.h"
16#include "byte.h" 17#include "byte.h"
17#include "io.h" 18#include "io.h"
18#include "iob.h" 19#include "iob.h"
19#include "array.h" 20#include "ip6.h"
20 21
21/* Opentracker */ 22/* Opentracker */
22#include "trackerlogic.h"
23#include "ot_mutex.h"
24#include "ot_stats.h"
25#include "ot_clean.h"
26#include "ot_http.h"
27#include "ot_accesslist.h" 23#include "ot_accesslist.h"
24#include "ot_clean.h"
28#include "ot_fullscrape.h" 25#include "ot_fullscrape.h"
26#include "ot_http.h"
29#include "ot_livesync.h" 27#include "ot_livesync.h"
28#include "ot_mutex.h"
29#include "ot_stats.h"
30#include "ot_vector.h"
31#include "trackerlogic.h"
30 32
31/* Forward declaration */ 33/* Forward declaration */
32size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ); 34size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto);
33 35
34void free_peerlist( ot_peerlist *peer_list ) { 36void free_peerlist(ot_peerlist *peer_list) {
35 if( peer_list->peers.data ) { 37 if (peer_list->peers.data) {
36 if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { 38 if (OT_PEERLIST_HASBUCKETS(peer_list))
37 ot_vector *bucket_list = (ot_vector*)(peer_list->peers.data); 39 vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size);
38 40 else
39 while( peer_list->peers.size-- ) 41 free(peer_list->peers.data);
40 free( bucket_list++->data );
41 }
42 free( peer_list->peers.data );
43 } 42 }
44 free( peer_list ); 43 free(peer_list);
45} 44}
46 45
47void add_torrent_from_saved_state( ot_hash hash, ot_time base, size_t down_count ) { 46void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count) {
48 int exactmatch; 47 int exactmatch;
49 ot_torrent *torrent; 48 ot_torrent *torrent;
50 ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); 49 ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash);
50
51 if (!accesslist_hashisvalid(hash))
52 return mutex_bucket_unlock_by_hash(hash, 0);
51 53
52 if( !accesslist_hashisvalid( hash ) ) 54 torrent = vector_find_or_insert(torrents_list, (void *)hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
53 return mutex_bucket_unlock_by_hash( hash, 0 ); 55 if (!torrent || exactmatch)
54 56 return mutex_bucket_unlock_by_hash(hash, 0);
55 torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
56 if( !torrent || exactmatch )
57 return mutex_bucket_unlock_by_hash( hash, 0 );
58 57
59 /* Create a new torrent entry, then */ 58 /* Create a new torrent entry, then */
60 memcpy( torrent->hash, hash, sizeof(ot_hash) ); 59 byte_zero(torrent, sizeof(ot_torrent));
61 60 memcpy(torrent->hash, hash, sizeof(ot_hash));
62 if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { 61
63 vector_remove_torrent( torrents_list, torrent ); 62 if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) {
64 return mutex_bucket_unlock_by_hash( hash, 0 ); 63 vector_remove_torrent(torrents_list, torrent);
64 return mutex_bucket_unlock_by_hash(hash, 0);
65 } 65 }
66
67 byte_zero( torrent->peer_list, sizeof( ot_peerlist ) );
68 torrent->peer_list->base = base;
69 torrent->peer_list->down_count = down_count;
70 66
71 return mutex_bucket_unlock_by_hash( hash, 1 ); 67 byte_zero(torrent->peer_list6, sizeof(ot_peerlist));
72} 68 byte_zero(torrent->peer_list4, sizeof(ot_peerlist));
69 torrent->peer_list6->base = base;
70 torrent->peer_list4->base = base;
71 torrent->peer_list6->down_count = down_count;
72 torrent->peer_list4->down_count = down_count;
73 73
74size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount ) { 74 return mutex_bucket_unlock_by_hash(hash, 1);
75 int exactmatch, delta_torrentcount = 0; 75}
76 ot_torrent *torrent;
77 ot_peer *peer_dest;
78 ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash );
79 76
80 if( !accesslist_hashisvalid( *ws->hash ) ) { 77size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount) {
81 mutex_bucket_unlock_by_hash( *ws->hash, 0 ); 78 int exactmatch, delta_torrentcount = 0;
82 if( proto == FLAG_TCP ) { 79 ot_torrent *torrent;
80 ot_peer *peer_dest;
81 ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash);
82 ot_peerlist *peer_list;
83 size_t peer_size; /* initialized in next line */
84 ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size);
85
86 if (!accesslist_hashisvalid(*ws->hash)) {
87 mutex_bucket_unlock_by_hash(*ws->hash, 0);
88 if (proto == FLAG_TCP) {
83 const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e"; 89 const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e";
84 memcpy( ws->reply, invalid_hash, strlen( invalid_hash ) ); 90 memcpy(ws->reply, invalid_hash, strlen(invalid_hash));
85 return strlen( invalid_hash ); 91 return strlen(invalid_hash);
86 } 92 }
87 return 0; 93 return 0;
88 } 94 }
89 95
90 torrent = vector_find_or_insert( torrents_list, (void*)ws->hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 96 torrent = vector_find_or_insert(torrents_list, (void *)ws->hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
91 if( !torrent ) { 97 if (!torrent) {
92 mutex_bucket_unlock_by_hash( *ws->hash, 0 ); 98 mutex_bucket_unlock_by_hash(*ws->hash, 0);
93 return 0; 99 return 0;
94 } 100 }
95 101
96 if( !exactmatch ) { 102 if (!exactmatch) {
97 /* Create a new torrent entry, then */ 103 /* Create a new torrent entry, then */
98 memcpy( torrent->hash, *ws->hash, sizeof(ot_hash) ); 104 byte_zero(torrent, sizeof(ot_torrent));
105 memcpy(torrent->hash, *ws->hash, sizeof(ot_hash));
99 106
100 if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { 107 if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) {
101 vector_remove_torrent( torrents_list, torrent ); 108 vector_remove_torrent(torrents_list, torrent);
102 mutex_bucket_unlock_by_hash( *ws->hash, 0 ); 109 mutex_bucket_unlock_by_hash(*ws->hash, 0);
103 return 0; 110 return 0;
104 } 111 }
105 112
106 byte_zero( torrent->peer_list, sizeof( ot_peerlist ) ); 113 byte_zero(torrent->peer_list6, sizeof(ot_peerlist));
114 byte_zero(torrent->peer_list4, sizeof(ot_peerlist));
107 delta_torrentcount = 1; 115 delta_torrentcount = 1;
108 } else 116 } else
109 clean_single_torrent( torrent ); 117 clean_single_torrent(torrent);
118
119 torrent->peer_list6->base = g_now_minutes;
120 torrent->peer_list4->base = g_now_minutes;
110 121
111 torrent->peer_list->base = g_now_minutes; 122 peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
112 123
113 /* Check for peer in torrent */ 124 /* Check for peer in torrent */
114 peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), &ws->peer, &exactmatch ); 125 peer_dest = vector_find_or_insert_peer(&(peer_list->peers), peer_src, peer_size, &exactmatch);
115 if( !peer_dest ) { 126 if (!peer_dest) {
116 mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); 127 mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
117 return 0; 128 return 0;
118 } 129 }
119 130
120 /* Tell peer that it's fresh */ 131 /* Tell peer that it's fresh */
121 OT_PEERTIME( &ws->peer ) = 0; 132 OT_PEERTIME(ws->peer, OT_PEER_SIZE6) = 0;
122 133
123 /* Sanitize flags: Whoever claims to have completed download, must be a seeder */ 134 /* Sanitize flags: Whoever claims to have completed download, must be a seeder */
124 if( ( OT_PEERFLAG( &ws->peer ) & ( PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING ) ) == PEER_FLAG_COMPLETED ) 135 if ((OT_PEERFLAG(ws->peer) & (PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING)) == PEER_FLAG_COMPLETED)
125 OT_PEERFLAG( &ws->peer ) ^= PEER_FLAG_COMPLETED; 136 OT_PEERFLAG(ws->peer) ^= PEER_FLAG_COMPLETED;
126 137
127 /* If we hadn't had a match create peer there */ 138 /* If we hadn't had a match create peer there */
128 if( !exactmatch ) { 139 if (!exactmatch) {
129 140
130#ifdef WANT_SYNC_LIVE 141#ifdef WANT_SYNC_LIVE
131 if( proto == FLAG_MCA ) 142 if (proto == FLAG_MCA)
132 OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_FROM_SYNC; 143 OT_PEERFLAG(ws->peer) |= PEER_FLAG_FROM_SYNC;
133 else 144 else
134 livesync_tell( ws ); 145 livesync_tell(ws);
135#endif 146#endif
136 147
137 torrent->peer_list->peer_count++; 148 peer_list->peer_count++;
138 if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) { 149 if (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED) {
139 torrent->peer_list->down_count++; 150 peer_list->down_count++;
140 stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws ); 151 stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws);
141 } 152 }
142 if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) 153 if (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING)
143 torrent->peer_list->seed_count++; 154 peer_list->seed_count++;
144 155
145 } else { 156 } else {
146 stats_issue_event( EVENT_RENEW, 0, OT_PEERTIME( peer_dest ) ); 157 stats_issue_event(EVENT_RENEW, 0, OT_PEERTIME(peer_dest, peer_size));
147#ifdef WANT_SPOT_WOODPECKER 158#ifdef WANT_SPOT_WOODPECKER
148 if( ( OT_PEERTIME(peer_dest) > 0 ) && ( OT_PEERTIME(peer_dest) < 20 ) ) 159 if ((OT_PEERTIME(peer_dest, peer_size) > 0) && (OT_PEERTIME(peer_dest, peer_size) < 20))
149 stats_issue_event( EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer ); 160 stats_issue_event(EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer);
150#endif 161#endif
151#ifdef WANT_SYNC_LIVE 162#ifdef WANT_SYNC_LIVE
152 /* Won't live sync peers that come back too fast. Only exception: 163 /* Won't live sync peers that come back too fast. Only exception:
153 fresh "completed" reports */ 164 fresh "completed" reports */
154 if( proto != FLAG_MCA ) { 165 if (proto != FLAG_MCA) {
155 if( OT_PEERTIME( peer_dest ) > OT_CLIENT_SYNC_RENEW_BOUNDARY || 166 if (OT_PEERTIME(peer_dest, peer_size) > OT_CLIENT_SYNC_RENEW_BOUNDARY ||
156 ( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) ) ) 167 (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED)))
157 livesync_tell( ws ); 168 livesync_tell(ws);
158 } 169 }
159#endif 170#endif
160 171
161 if( (OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && !(OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) ) 172 if ((OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && !(OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING))
162 torrent->peer_list->seed_count--; 173 peer_list->seed_count--;
163 if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) ) 174 if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING))
164 torrent->peer_list->seed_count++; 175 peer_list->seed_count++;
165 if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) ) { 176 if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED)) {
166 torrent->peer_list->down_count++; 177 peer_list->down_count++;
167 stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws ); 178 stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws);
168 } 179 }
169 if( OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) 180 if (OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED)
170 OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED; 181 OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED;
171 } 182 }
172 183
173 memcpy( peer_dest, &ws->peer, sizeof(ot_peer) ); 184 memcpy(peer_dest, peer_src, peer_size);
174#ifdef WANT_SYNC 185#ifdef WANT_SYNC
175 if( proto == FLAG_MCA ) { 186 if (proto == FLAG_MCA) {
176 mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); 187 mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
177 return 0; 188 return 0;
178 } 189 }
179#endif 190#endif
180 191
181 ws->reply_size = return_peers_for_torrent( torrent, amount, ws->reply, proto ); 192 ws->reply_size = return_peers_for_torrent(ws, torrent, amount, ws->reply, proto);
182 mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); 193 mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
183 return ws->reply_size; 194 return ws->reply_size;
184} 195}
185 196
186static size_t return_peers_all( ot_peerlist *peer_list, char *reply ) { 197static size_t return_peers_all(ot_peerlist *peer_list, size_t peer_size, char *reply) {
187 unsigned int bucket, num_buckets = 1; 198 unsigned int bucket, num_buckets = 1;
188 ot_vector * bucket_list = &peer_list->peers; 199 ot_vector *bucket_list = &peer_list->peers;
189 size_t result = OT_PEER_COMPARE_SIZE * peer_list->peer_count; 200 size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
190 char * r_end = reply + result; 201 size_t result = compare_size * peer_list->peer_count;
202 char *r_end = reply + result;
191 203
192 if( OT_PEERLIST_HASBUCKETS(peer_list) ) { 204 if (OT_PEERLIST_HASBUCKETS(peer_list)) {
193 num_buckets = bucket_list->size; 205 num_buckets = bucket_list->size;
194 bucket_list = (ot_vector *)bucket_list->data; 206 bucket_list = (ot_vector *)bucket_list->data;
195 } 207 }
196 208
197 for( bucket = 0; bucket<num_buckets; ++bucket ) { 209 for (bucket = 0; bucket < num_buckets; ++bucket) {
198 ot_peer * peers = (ot_peer*)bucket_list[bucket].data; 210 ot_peer *peers = bucket_list[bucket].data;
199 size_t peer_count = bucket_list[bucket].size; 211 size_t peer_count = bucket_list[bucket].size;
200 while( peer_count-- ) { 212 while (peer_count--) {
201 if( OT_PEERFLAG(peers) & PEER_FLAG_SEEDING ) { 213 if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING) {
202 r_end-=OT_PEER_COMPARE_SIZE; 214 r_end -= compare_size;
203 memcpy(r_end,peers++,OT_PEER_COMPARE_SIZE); 215 memcpy(r_end, peers, compare_size);
204 } else { 216 } else {
205 memcpy(reply,peers++,OT_PEER_COMPARE_SIZE); 217 memcpy(reply, peers, compare_size);
206 reply+=OT_PEER_COMPARE_SIZE; 218 reply += compare_size;
207 } 219 }
220 peers += peer_size;
208 } 221 }
209 } 222 }
210 return result; 223 return result;
211} 224}
212 225
213static size_t return_peers_selection( ot_peerlist *peer_list, size_t amount, char *reply ) { 226static size_t return_peers_selection(struct ot_workstruct *ws, ot_peerlist *peer_list, size_t peer_size, size_t amount, char *reply) {
214 unsigned int bucket_offset, bucket_index = 0, num_buckets = 1; 227 unsigned int bucket_offset, bucket_index = 0, num_buckets = 1;
215 ot_vector * bucket_list = &peer_list->peers; 228 ot_vector *bucket_list = &peer_list->peers;
216 unsigned int shifted_pc = peer_list->peer_count; 229 unsigned int shifted_pc = peer_list->peer_count;
217 unsigned int shifted_step = 0; 230 unsigned int shifted_step = 0;
218 unsigned int shift = 0; 231 unsigned int shift = 0;
219 size_t result = OT_PEER_COMPARE_SIZE * amount; 232 size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
220 char * r_end = reply + result; 233 size_t result = compare_size * amount;
221 234 char *r_end = reply + result;
222 if( OT_PEERLIST_HASBUCKETS(peer_list) ) { 235
236 if (OT_PEERLIST_HASBUCKETS(peer_list)) {
223 num_buckets = bucket_list->size; 237 num_buckets = bucket_list->size;
224 bucket_list = (ot_vector *)bucket_list->data; 238 bucket_list = (ot_vector *)bucket_list->data;
225 } 239 }
226 240
227 /* Make fixpoint arithmetic as exact as possible */ 241 /* Make fixpoint arithmetic as exact as possible */
228#define MAXPRECBIT (1<<(8*sizeof(int)-3)) 242#define MAXPRECBIT (1 << (8 * sizeof(int) - 3))
229 while( !(shifted_pc & MAXPRECBIT ) ) { shifted_pc <<= 1; shift++; } 243 while (!(shifted_pc & MAXPRECBIT)) {
230 shifted_step = shifted_pc/amount; 244 shifted_pc <<= 1;
245 shift++;
246 }
247 shifted_step = shifted_pc / amount;
231#undef MAXPRECBIT 248#undef MAXPRECBIT
232 249
233 /* Initialize somewhere in the middle of peers so that 250 /* Initialize somewhere in the middle of peers so that
234 fixpoint's aliasing doesn't alway miss the same peers */ 251 fixpoint's aliasing doesn't alway miss the same peers */
235 bucket_offset = random() % peer_list->peer_count; 252 bucket_offset = nrand48(ws->rand48_state) % peer_list->peer_count;
236 253
237 while( amount-- ) { 254 while (amount--) {
238 ot_peer * peer; 255 ot_peer *peer;
239 256
240 /* This is the aliased, non shifted range, next value may fall into */ 257 /* This is the aliased, non shifted range, next value may fall into */
241 unsigned int diff = ( ( ( amount + 1 ) * shifted_step ) >> shift ) - 258 unsigned int diff = (((amount + 1) * shifted_step) >> shift) - ((amount * shifted_step) >> shift);
242 ( ( amount * shifted_step ) >> shift ); 259 bucket_offset += 1 + nrand48(ws->rand48_state) % diff;
243 bucket_offset += 1 + random() % diff;
244 260
245 while( bucket_offset >= bucket_list[bucket_index].size ) { 261 while (bucket_offset >= bucket_list[bucket_index].size) {
246 bucket_offset -= bucket_list[bucket_index].size; 262 bucket_offset -= bucket_list[bucket_index].size;
247 bucket_index = ( bucket_index + 1 ) % num_buckets; 263 bucket_index = (bucket_index + 1) % num_buckets;
248 } 264 }
249 peer = ((ot_peer*)bucket_list[bucket_index].data) + bucket_offset; 265 peer = bucket_list[bucket_index].data + peer_size * bucket_offset;
250 if( OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) { 266 if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_SEEDING) {
251 r_end-=OT_PEER_COMPARE_SIZE; 267 r_end -= compare_size;
252 memcpy(r_end,peer,OT_PEER_COMPARE_SIZE); 268 memcpy(r_end, peer, compare_size);
253 } else { 269 } else {
254 memcpy(reply,peer,OT_PEER_COMPARE_SIZE); 270 memcpy(reply, peer, compare_size);
255 reply+=OT_PEER_COMPARE_SIZE; 271 reply += compare_size;
256 } 272 }
257 } 273 }
258 return result; 274 return result;
259} 275}
260 276
261/* Compiles a list of random peers for a torrent 277static size_t return_peers_for_torrent_udp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) {
262 * reply must have enough space to hold 92+6*amount bytes 278 char *r = reply;
263 * does not yet check not to return self 279 size_t peer_size = peer_size_from_peer6(&ws->peer);
264*/ 280 ot_peerlist *peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
265size_t return_peers_for_torrent( ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ) { 281 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
266 ot_peerlist *peer_list = torrent->peer_list; 282 size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
267 char *r = reply; 283
268 284 if (amount > peer_list->peer_count)
269 if( amount > peer_list->peer_count )
270 amount = peer_list->peer_count; 285 amount = peer_list->peer_count;
271 286
272 if( proto == FLAG_TCP ) { 287 *(uint32_t *)(r + 0) = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM);
273 int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; 288 *(uint32_t *)(r + 4) = htonl(peer_count - seed_count);
274 r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie" PEERS_BENCODED "%zd:", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count, erval, erval/2, OT_PEER_COMPARE_SIZE*amount ); 289 *(uint32_t *)(r + 8) = htonl(seed_count);
275 } else { 290 r += 12;
276 *(uint32_t*)(r+0) = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); 291
277 *(uint32_t*)(r+4) = htonl( peer_list->peer_count - peer_list->seed_count ); 292 if (amount) {
278 *(uint32_t*)(r+8) = htonl( peer_list->seed_count ); 293 if (amount == peer_list->peer_count)
279 r += 12; 294 r += return_peers_all(peer_list, peer_size, r);
295 else
296 r += return_peers_selection(ws, peer_list, peer_size, amount, r);
280 } 297 }
298 return r - reply;
299}
281 300
282 if( amount ) { 301static size_t return_peers_for_torrent_tcp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) {
283 if( amount == peer_list->peer_count ) 302 char *r = reply;
284 r += return_peers_all( peer_list, r ); 303 int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
304 size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
305 size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count;
306 size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - seed_count;
307
308 /* Simple case: amount of peers in both lists is less than requested, here we return all results */
309 size_t amount_v4 = torrent->peer_list4->peer_count;
310 size_t amount_v6 = torrent->peer_list6->peer_count;
311
312 /* Complex case: both lists have more than enough entries and we need to split between v4 and v6 clients */
313 if (amount_v4 + amount_v6 > amount) {
314 size_t amount_left, percent_v6 = 0, percent_v4 = 0, left_v6, left_v4;
315 const size_t SCALE = 1024;
316
317 /* If possible, fill at least a quarter of peer from each family */
318 if (amount / 4 <= amount_v4)
319 amount_v4 = amount / 4;
320 if (amount / 4 <= amount_v6)
321 amount_v6 = amount / 4;
322
323 /* Fill the rest according to which family's pool provides more peers */
324 amount_left = amount - (amount_v4 + amount_v6);
325
326 left_v4 = torrent->peer_list4->peer_count - amount_v4;
327 left_v6 = torrent->peer_list6->peer_count - amount_v6;
328
329 if (left_v4 + left_v6) {
330 percent_v4 = (SCALE * left_v4) / (left_v4 + left_v6);
331 percent_v6 = (SCALE * left_v6) / (left_v4 + left_v6);
332 }
333
334 amount_v4 += (amount_left * percent_v4) / SCALE;
335 amount_v6 += (amount_left * percent_v6) / SCALE;
336
337 /* Integer division rounding can leave out a peer */
338 if (amount_v4 + amount_v6 < amount && amount_v6 < torrent->peer_list6->peer_count)
339 ++amount_v6;
340 if (amount_v4 + amount_v6 < amount && amount_v4 < torrent->peer_list4->peer_count)
341 ++amount_v4;
342 }
343
344 r +=
345 sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie", seed_count, down_count, peer_count, erval, erval / 2);
346
347 if (amount_v4) {
348 r += sprintf(r, PEERS_BENCODED4 "%zd:", OT_PEER_COMPARE_SIZE4 * amount_v4);
349 if (amount_v4 == torrent->peer_list4->peer_count)
350 r += return_peers_all(torrent->peer_list4, OT_PEER_SIZE4, r);
351 else
352 r += return_peers_selection(ws, torrent->peer_list4, OT_PEER_SIZE4, amount_v4, r);
353 }
354
355 if (amount_v6) {
356 r += sprintf(r, PEERS_BENCODED6 "%zd:", OT_PEER_COMPARE_SIZE6 * amount_v6);
357 if (amount_v6 == torrent->peer_list6->peer_count)
358 r += return_peers_all(torrent->peer_list6, OT_PEER_SIZE6, r);
285 else 359 else
286 r += return_peers_selection( peer_list, amount, r ); 360 r += return_peers_selection(ws, torrent->peer_list6, OT_PEER_SIZE6, amount_v6, r);
287 } 361 }
288 362
289 if( proto == FLAG_TCP ) 363 *r++ = 'e';
290 *r++ = 'e';
291 364
292 return r - reply; 365 return r - reply;
293} 366}
294 367
368/* Compiles a list of random peers for a torrent
369 * Reply must have enough space to hold:
370 * 92 + 6 * amount bytes for TCP/IPv4
371 * 92 + 18 * amount bytes for TCP/IPv6
372 * 12 + 6 * amount bytes for UDP/IPv4
373 * 12 + 18 * amount bytes for UDP/IPv6
374 * Does not yet check not to return self
375 */
376size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto) {
377 return proto == FLAG_TCP ? return_peers_for_torrent_tcp(ws, torrent, amount, reply) : return_peers_for_torrent_udp(ws, torrent, amount, reply);
378}
379
295/* Fetches scrape info for a specific torrent */ 380/* Fetches scrape info for a specific torrent */
296size_t return_udp_scrape_for_torrent( ot_hash hash, char *reply ) { 381size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply) {
297 int exactmatch, delta_torrentcount = 0; 382 int exactmatch, delta_torrentcount = 0;
298 ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); 383 ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash);
299 ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 384 ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
300 385
301 if( !exactmatch ) { 386 if (!exactmatch) {
302 memset( reply, 0, 12); 387 memset(reply, 0, 12);
303 } else { 388 } else {
304 uint32_t *r = (uint32_t*) reply; 389 uint32_t *r = (uint32_t *)reply;
305 390
306 if( clean_single_torrent( torrent ) ) { 391 if (clean_single_torrent(torrent)) {
307 vector_remove_torrent( torrents_list, torrent ); 392 vector_remove_torrent(torrents_list, torrent);
308 memset( reply, 0, 12); 393 memset(reply, 0, 12);
309 delta_torrentcount = -1; 394 delta_torrentcount = -1;
310 } else { 395 } else {
311 r[0] = htonl( torrent->peer_list->seed_count ); 396 r[0] = htonl(torrent->peer_list6->seed_count + torrent->peer_list4->seed_count);
312 r[1] = htonl( torrent->peer_list->down_count ); 397 r[1] = htonl(torrent->peer_list6->down_count + torrent->peer_list4->down_count);
313 r[2] = htonl( torrent->peer_list->peer_count-torrent->peer_list->seed_count ); 398 r[2] = htonl(torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count);
314 } 399 }
315 } 400 }
316 mutex_bucket_unlock_by_hash( hash, delta_torrentcount ); 401 mutex_bucket_unlock_by_hash(hash, delta_torrentcount);
317 return 12; 402 return 12;
318} 403}
319 404
320/* Fetches scrape info for a specific torrent */ 405/* Fetches scrape info for a specific torrent */
321size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *reply ) { 406size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply) {
322 char *r = reply; 407 char *r = reply;
323 int exactmatch, i; 408 int exactmatch, i;
324 409
325 r += sprintf( r, "d5:filesd" ); 410 r += sprintf(r, "d5:filesd");
326 411
327 for( i=0; i<amount; ++i ) { 412 for (i = 0; i < amount; ++i) {
328 int delta_torrentcount = 0; 413 int delta_torrentcount = 0;
329 ot_hash *hash = hash_list + i; 414 ot_hash const *hash = hash_list + i;
330 ot_vector *torrents_list = mutex_bucket_lock_by_hash( *hash ); 415 ot_vector *torrents_list = mutex_bucket_lock_by_hash(*hash);
331 ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 416 ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
332 417
333 if( exactmatch ) { 418 if (exactmatch) {
334 if( clean_single_torrent( torrent ) ) { 419 if (clean_single_torrent(torrent)) {
335 vector_remove_torrent( torrents_list, torrent ); 420 vector_remove_torrent(torrents_list, torrent);
336 delta_torrentcount = -1; 421 delta_torrentcount = -1;
337 } else { 422 } else {
338 *r++='2';*r++='0';*r++=':'; 423 *r++ = '2';
339 memcpy( r, hash, sizeof(ot_hash) ); r+=sizeof(ot_hash); 424 *r++ = '0';
340 r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", 425 *r++ = ':';
341 torrent->peer_list->seed_count, torrent->peer_list->down_count, torrent->peer_list->peer_count-torrent->peer_list->seed_count ); 426 memcpy(r, hash, sizeof(ot_hash));
427 r += sizeof(ot_hash);
428 r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", torrent->peer_list6->seed_count + torrent->peer_list4->seed_count,
429 torrent->peer_list6->down_count + torrent->peer_list4->down_count,
430 torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count);
342 } 431 }
343 } 432 }
344 mutex_bucket_unlock_by_hash( *hash, delta_torrentcount ); 433 mutex_bucket_unlock_by_hash(*hash, delta_torrentcount);
345 } 434 }
346 435
347 *r++ = 'e'; *r++ = 'e'; 436 *r++ = 'e';
437 *r++ = 'e';
348 return r - reply; 438 return r - reply;
349} 439}
350 440
351static ot_peerlist dummy_list; 441static ot_peerlist dummy_list;
352size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws ) { 442size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws) {
353 int exactmatch; 443 int exactmatch;
354 ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash ); 444 ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash);
355 ot_torrent *torrent = binary_search( ws->hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); 445 ot_torrent *torrent = binary_search(ws->hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
356 ot_peerlist *peer_list = &dummy_list; 446 ot_peerlist *peer_list = &dummy_list;
447 size_t peer_size; /* initialized in next line */
448 ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size);
449 size_t peer_count = 0, seed_count = 0;
357 450
358#ifdef WANT_SYNC_LIVE 451#ifdef WANT_SYNC_LIVE
359 if( proto != FLAG_MCA ) { 452 if (proto != FLAG_MCA) {
360 OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED; 453 OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED;
361 livesync_tell( ws ); 454 livesync_tell(ws);
362 } 455 }
363#endif 456#endif
364 457
365 if( exactmatch ) { 458 if (exactmatch) {
366 peer_list = torrent->peer_list; 459 peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
367 switch( vector_remove_peer( &peer_list->peers, &ws->peer ) ) { 460 switch (vector_remove_peer(&peer_list->peers, peer_src, peer_size)) {
368 case 2: peer_list->seed_count--; /* Fall throughs intended */ 461 case 2:
369 case 1: peer_list->peer_count--; /* Fall throughs intended */ 462 peer_list->seed_count--; /* Intentional fallthrough */
370 default: break; 463 case 1:
464 peer_list->peer_count--; /* Intentional fallthrough */
465 default:
466 break;
371 } 467 }
468
469 peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
470 seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
372 } 471 }
373 472
374 if( proto == FLAG_TCP ) { 473 if (proto == FLAG_TCP) {
375 int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; 474 int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
376 ws->reply_size = sprintf( ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie" PEERS_BENCODED "0:e", peer_list->seed_count, peer_list->peer_count - peer_list->seed_count, erval, erval / 2 ); 475 ws->reply_size = sprintf(ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie%s0:e", seed_count, peer_count - seed_count, erval,
476 erval / 2, peer_size == OT_PEER_SIZE6 ? PEERS_BENCODED6 : PEERS_BENCODED4);
377 } 477 }
378 478
379 /* Handle UDP reply */ 479 /* Handle UDP reply */
380 if( proto == FLAG_UDP ) { 480 if (proto == FLAG_UDP) {
381 ((uint32_t*)ws->reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); 481 ((uint32_t *)ws->reply)[2] = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM);
382 ((uint32_t*)ws->reply)[3] = htonl( peer_list->peer_count - peer_list->seed_count ); 482 ((uint32_t *)ws->reply)[3] = htonl(peer_count - seed_count);
383 ((uint32_t*)ws->reply)[4] = htonl( peer_list->seed_count); 483 ((uint32_t *)ws->reply)[4] = htonl(seed_count);
384 ws->reply_size = 20; 484 ws->reply_size = 20;
385 } 485 }
386 486
387 mutex_bucket_unlock_by_hash( *ws->hash, 0 ); 487 mutex_bucket_unlock_by_hash(*ws->hash, 0);
388 return ws->reply_size; 488 return ws->reply_size;
389} 489}
390 490
391void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ) { 491void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data) {
392 int bucket; 492 int bucket;
393 size_t j; 493 size_t j;
394 494
395 for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 495 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
396 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 496 ot_vector *torrents_list = mutex_bucket_lock(bucket);
397 ot_torrent *torrents = (ot_torrent*)(torrents_list->data); 497 ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
398 498
399 for( j=0; j<torrents_list->size; ++j ) 499 for (j = 0; j < torrents_list->size; ++j)
400 if( for_each( torrents + j, data ) ) 500 if (for_each(torrents + j, data))
401 break; 501 break;
402 502
403 mutex_bucket_unlock( bucket, 0 ); 503 mutex_bucket_unlock(bucket, 0);
404 if( !g_opentracker_running ) return; 504 if (!g_opentracker_running)
505 return;
506 }
507}
508
509ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size) {
510 ot_ip6 *ip = (ot_ip6 *)peer;
511 if (!ip6_isv4mapped(ip)) {
512 *peer_size = OT_PEER_SIZE6;
513 return (ot_peer *)peer;
405 } 514 }
515 *peer_size = OT_PEER_SIZE4;
516 return (ot_peer *)(((uint8_t *)peer) + 12);
406} 517}
407 518
408void exerr( char * message ) { 519size_t peer_size_from_peer6(ot_peer6 *peer) {
409 fprintf( stderr, "%s\n", message ); 520 ot_ip6 *ip = (ot_ip6 *)peer;
410 exit( 111 ); 521 if (!ip6_isv4mapped(ip))
522 return OT_PEER_SIZE6;
523 return OT_PEER_SIZE4;
411} 524}
412 525
413void trackerlogic_init( ) { 526#ifdef _DEBUG_RANDOMTORRENTS
527void trackerlogic_add_random_torrents(size_t amount) {
528 struct ot_workstruct ws;
529 memset(&ws, 0, sizeof(ws));
530
531 ws.inbuf = malloc(G_INBUF_SIZE);
532 ws.outbuf = malloc(G_OUTBUF_SIZE);
533 ws.reply = ws.outbuf;
534 ws.hash = (ot_hash *)ws.inbuf;
535
536 while (amount--) {
537 arc4random_buf(ws.hash, sizeof(ot_hash));
538 arc4random_buf(&ws.peer, sizeof(ws.peer));
539
540 OT_PEERFLAG(ws.peer) &= PEER_FLAG_SEEDING | PEER_FLAG_COMPLETED | PEER_FLAG_STOPPED;
541
542 add_peer_to_torrent_and_return_peers(FLAG_TCP, &ws, 1);
543 }
544
545 free(ws.inbuf);
546 free(ws.outbuf);
547}
548#endif
549
550void exerr(char *message) {
551 fprintf(stderr, "%s\n", message);
552 exit(111);
553}
554
555void trackerlogic_init() {
414 g_tracker_id = random(); 556 g_tracker_id = random();
415 557
416 if( !g_stats_path ) 558 if (!g_stats_path)
417 g_stats_path = "stats"; 559 g_stats_path = "stats";
418 g_stats_path_len = strlen( g_stats_path ); 560 g_stats_path_len = strlen(g_stats_path);
419 561
420 /* Initialise background worker threads */ 562 /* Initialise background worker threads */
421 mutex_init( ); 563 mutex_init();
422 clean_init( ); 564 clean_init();
423 fullscrape_init( ); 565 fullscrape_init();
424 accesslist_init( ); 566 accesslist_init();
425 livesync_init( ); 567 livesync_init();
426 stats_init( ); 568 stats_init();
427} 569}
428 570
429void trackerlogic_deinit( void ) { 571void trackerlogic_deinit(void) {
430 int bucket, delta_torrentcount = 0; 572 int bucket, delta_torrentcount = 0;
431 size_t j; 573 size_t j;
432 574
433 /* Free all torrents... */ 575 /* Free all torrents... */
434 for(bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { 576 for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
435 ot_vector *torrents_list = mutex_bucket_lock( bucket ); 577 ot_vector *torrents_list = mutex_bucket_lock(bucket);
436 if( torrents_list->size ) { 578 if (torrents_list->size) {
437 for( j=0; j<torrents_list->size; ++j ) { 579 for (j = 0; j < torrents_list->size; ++j) {
438 ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + j; 580 ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + j;
439 free_peerlist( torrent->peer_list ); 581 free_peerlist(torrent->peer_list6);
582 free_peerlist(torrent->peer_list4);
440 delta_torrentcount -= 1; 583 delta_torrentcount -= 1;
441 } 584 }
442 free( torrents_list->data ); 585 free(torrents_list->data);
443 } 586 }
444 mutex_bucket_unlock( bucket, delta_torrentcount ); 587 mutex_bucket_unlock(bucket, delta_torrentcount);
445 } 588 }
446 589
447 /* Deinitialise background worker threads */ 590 /* Deinitialise background worker threads */
448 stats_deinit( ); 591 stats_deinit();
449 livesync_deinit( ); 592 livesync_deinit();
450 accesslist_deinit( ); 593 accesslist_deinit();
451 fullscrape_deinit( ); 594 fullscrape_deinit();
452 clean_deinit( ); 595 clean_deinit();
453 /* Release mutexes */ 596 /* Release mutexes */
454 mutex_deinit( ); 597 mutex_deinit();
455} 598}
456
457const char *g_version_trackerlogic_c = "$Source$: $Revision$\n";
diff --git a/trackerlogic.h b/trackerlogic.h
index 721ba6e..022184d 100644
--- a/trackerlogic.h
+++ b/trackerlogic.h
@@ -6,131 +6,166 @@
6#ifndef OT_TRACKERLOGIC_H__ 6#ifndef OT_TRACKERLOGIC_H__
7#define OT_TRACKERLOGIC_H__ 7#define OT_TRACKERLOGIC_H__
8 8
9#include <sys/types.h> 9#include <stdint.h>
10#include <stdlib.h>
10#include <sys/time.h> 11#include <sys/time.h>
12#include <sys/types.h>
11#include <time.h> 13#include <time.h>
12#include <stdint.h> 14
15#if defined(__linux__) && defined(WANT_ARC4RANDOM)
16#include <bsd/stdlib.h>
17#endif
18#ifdef __FreeBSD__
19#define WANT_ARC4RANDOM
20#endif
13 21
14typedef uint8_t ot_hash[20]; 22typedef uint8_t ot_hash[20];
15typedef time_t ot_time; 23typedef time_t ot_time;
16typedef char ot_ip6[16]; 24typedef char ot_ip6[16];
17typedef struct { ot_ip6 address; int bits; } 25typedef struct {
18 ot_net; 26 ot_ip6 address;
19#ifdef WANT_V6 27 int bits;
20#define OT_IP_SIZE 16 28} ot_net;
21#define PEERS_BENCODED "6:peers6" 29/* List of peers should fit in a single UDP packet (around 1200 bytes) */
22#else 30#define OT_MAX_PEERS_UDP6 66
23#define OT_IP_SIZE 4 31#define OT_MAX_PEERS_UDP4 200
24#define PEERS_BENCODED "5:peers" 32
25#endif 33#define OT_IP_SIZE6 16
34#define OT_IP_SIZE4 4
35#define OT_PORT_SIZE 2
36#define OT_FLAG_SIZE 1
37#define OT_TIME_SIZE 1
26 38
27/* Some tracker behaviour tunable */ 39/* Some tracker behaviour tunable */
28#define OT_CLIENT_TIMEOUT 30 40#define OT_CLIENT_TIMEOUT 30
29#define OT_CLIENT_TIMEOUT_CHECKINTERVAL 10 41#define OT_CLIENT_TIMEOUT_CHECKINTERVAL 10
30#define OT_CLIENT_TIMEOUT_SEND (60*15) 42#define OT_CLIENT_TIMEOUT_SEND (60 * 15)
31#define OT_CLIENT_REQUEST_INTERVAL (60*30) 43#define OT_CLIENT_REQUEST_INTERVAL (60 * 30)
32#define OT_CLIENT_REQUEST_VARIATION (60*6) 44#define OT_CLIENT_REQUEST_VARIATION (60 * 6)
33 45
34#define OT_TORRENT_TIMEOUT_HOURS 24 46#define OT_TORRENT_TIMEOUT_HOURS 24
35#define OT_TORRENT_TIMEOUT (60*OT_TORRENT_TIMEOUT_HOURS) 47#define OT_TORRENT_TIMEOUT (60 * OT_TORRENT_TIMEOUT_HOURS)
36 48
37#define OT_CLIENT_REQUEST_INTERVAL_RANDOM ( OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION/2 + (int)( random( ) % OT_CLIENT_REQUEST_VARIATION ) ) 49#define OT_CLIENT_REQUEST_INTERVAL_RANDOM \
50 (OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION / 2 + (int)(nrand48(ws->rand48_state) % OT_CLIENT_REQUEST_VARIATION))
38 51
39/* If WANT_MODEST_FULLSCRAPES is on, ip addresses may not 52/* If WANT_MODEST_FULLSCRAPES is on, ip addresses may not
40 fullscrape more frequently than this amount in seconds */ 53 fullscrape more frequently than this amount in seconds */
41#define OT_MODEST_PEER_TIMEOUT (60*5) 54#define OT_MODEST_PEER_TIMEOUT (60 * 5)
42 55
43/* If peers come back before 10 minutes, don't live sync them */ 56/* If peers come back before 10 minutes, don't live sync them */
44#define OT_CLIENT_SYNC_RENEW_BOUNDARY 10 57#define OT_CLIENT_SYNC_RENEW_BOUNDARY 10
45 58
46/* Number of tracker admin ip addresses allowed */ 59/* Number of tracker admin ip addresses allowed */
47#define OT_ADMINIP_MAX 64 60#define OT_ADMINIP_MAX 64
48#define OT_MAX_THREADS 64 61#define OT_MAX_THREADS 64
49 62
50#define OT_PEER_TIMEOUT 45 63/* Number of minutes after announce before peer is removed */
64#define OT_PEER_TIMEOUT 45
51 65
52/* We maintain a list of 1024 pointers to sorted list of ot_torrent structs 66/* We maintain a list of 1024 pointers to sorted list of ot_torrent structs
53 Sort key is, of course, its hash */ 67 Sort key is, of course, its hash */
54#define OT_BUCKET_COUNT_BITS 10 68#define OT_BUCKET_COUNT_BITS 10
69
70#define OT_BUCKET_COUNT (1 << OT_BUCKET_COUNT_BITS)
71#define OT_BUCKET_COUNT_SHIFT (32 - OT_BUCKET_COUNT_BITS)
55 72
56#define OT_BUCKET_COUNT (1<<OT_BUCKET_COUNT_BITS) 73/* if _DEBUG_RANDOMTORRENTS is set, this is the amount of torrents to create
57#define OT_BUCKET_COUNT_SHIFT (32-OT_BUCKET_COUNT_BITS) 74 on startup */
75#define RANDOMTORRENTS (1024 * 1024 * 1)
58 76
59/* From opentracker.c */ 77/* From opentracker.c */
60extern time_t g_now_seconds; 78extern time_t g_now_seconds;
61extern volatile int g_opentracker_running; 79extern volatile int g_opentracker_running;
62#define g_now_minutes (g_now_seconds/60) 80#define g_now_minutes (g_now_seconds / 60)
63 81
64extern uint32_t g_tracker_id; 82extern uint32_t g_tracker_id;
65typedef enum { FLAG_TCP, FLAG_UDP, FLAG_MCA, FLAG_SELFPIPE } PROTO_FLAG; 83typedef enum { FLAG_TCP, FLAG_UDP, FLAG_MCA, FLAG_SELFPIPE } PROTO_FLAG;
66 84
67typedef struct { 85#define OT_PEER_COMPARE_SIZE6 ((OT_IP_SIZE6) + (OT_PORT_SIZE))
68 uint8_t data[OT_IP_SIZE+2+2]; 86#define OT_PEER_COMPARE_SIZE4 ((OT_IP_SIZE4) + (OT_PORT_SIZE))
69} ot_peer; 87#define OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(PEER_SIZE) ((PEER_SIZE) - (OT_TIME_SIZE) - (OT_FLAG_SIZE))
88
89#define OT_PEER_SIZE6 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE6))
90#define OT_PEER_SIZE4 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE4))
91
92typedef uint8_t ot_peer; /* Generic pointer to a v6 or v4 peer */
93typedef uint8_t ot_peer6[OT_PEER_SIZE6];
94typedef uint8_t ot_peer4[OT_PEER_SIZE4];
70static const uint8_t PEER_FLAG_SEEDING = 0x80; 95static const uint8_t PEER_FLAG_SEEDING = 0x80;
71static const uint8_t PEER_FLAG_COMPLETED = 0x40; 96static const uint8_t PEER_FLAG_COMPLETED = 0x40;
72static const uint8_t PEER_FLAG_STOPPED = 0x20; 97static const uint8_t PEER_FLAG_STOPPED = 0x20;
73static const uint8_t PEER_FLAG_FROM_SYNC = 0x10; 98static const uint8_t PEER_FLAG_FROM_SYNC = 0x10;
74static const uint8_t PEER_FLAG_LEECHING = 0x00; 99static const uint8_t PEER_FLAG_LEECHING = 0x00;
75 100
76#ifdef WANT_V6 101/* Takes an ot_peer6 and returns the proper pointer to the peer and sets peer_size */
77#define OT_SETIP(peer,ip) memcpy((peer),(ip),(OT_IP_SIZE)) 102ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size);
78#else 103size_t peer_size_from_peer6(ot_peer6 *peer);
79#define OT_SETIP(peer,ip) memcpy((peer),(((uint8_t*)ip)+12),(OT_IP_SIZE)) 104
80#endif 105/* New style */
81#define OT_SETPORT(peer,port) memcpy(((uint8_t*)(peer))+(OT_IP_SIZE),(port),2) 106#define OT_SETIP(peer, ip) memcpy((uint8_t *)(peer), (ip), OT_IP_SIZE6)
82#define OT_PEERFLAG(peer) (((uint8_t*)(peer))[(OT_IP_SIZE)+2]) 107#define OT_SETPORT(peer, port) memcpy(((uint8_t *)(peer)) + (OT_IP_SIZE6), (port), 2)
83#define OT_PEERTIME(peer) (((uint8_t*)(peer))[(OT_IP_SIZE)+3]) 108#define OT_PEERFLAG(peer) (((uint8_t *)(peer))[(OT_IP_SIZE6) + 2])
109#define OT_PEERFLAG_D(peer, peersize) (((uint8_t *)(peer))[(peersize) - 2])
110#define OT_PEERTIME(peer, peersize) (((uint8_t *)(peer))[(peersize) - 1])
111
112#define PEERS_BENCODED6 "6:peers6"
113#define PEERS_BENCODED4 "5:peers"
84 114
85#define OT_HASH_COMPARE_SIZE (sizeof(ot_hash)) 115#define OT_HASH_COMPARE_SIZE (sizeof(ot_hash))
86#define OT_PEER_COMPARE_SIZE ((OT_IP_SIZE)+2)
87 116
88struct ot_peerlist; 117struct ot_peerlist;
89typedef struct ot_peerlist ot_peerlist; 118typedef struct ot_peerlist ot_peerlist;
90typedef struct { 119typedef struct {
91 ot_hash hash; 120 ot_hash hash;
92 ot_peerlist *peer_list; 121 ot_peerlist *peer_list6;
122 ot_peerlist *peer_list4;
93} ot_torrent; 123} ot_torrent;
94 124
95#include "ot_vector.h" 125#include "ot_vector.h"
96 126
97struct ot_peerlist { 127struct ot_peerlist {
98 ot_time base; 128 ot_time base;
99 size_t seed_count; 129 size_t seed_count;
100 size_t peer_count; 130 size_t peer_count;
101 size_t down_count; 131 size_t down_count;
102/* normal peers vector or 132 /* normal peers vector or
103 pointer to ot_vector[32] buckets if data != NULL and space == 0 133 pointer to ot_vector[32] buckets if data != NULL and space == 0
104*/ 134 */
105 ot_vector peers; 135 ot_vector peers;
106}; 136};
107#define OT_PEERLIST_HASBUCKETS(peer_list) ((peer_list)->peers.size > (peer_list)->peers.space) 137#define OT_PEERLIST_HASBUCKETS(peer_list) ((peer_list)->peers.size > (peer_list)->peers.space)
108 138
109struct ot_workstruct { 139struct ot_workstruct {
110 /* Thread specific, static */ 140 /* Thread specific, static */
111 char *inbuf; 141 char *inbuf;
112#define G_INBUF_SIZE 8192 142#define G_INBUF_SIZE 8192
113 char *outbuf; 143 char *outbuf;
114#define G_OUTBUF_SIZE 8192 144#define G_OUTBUF_SIZE 8192
115#ifdef _DEBUG_HTTPERROR 145#ifdef _DEBUG_HTTPERROR
116 char *debugbuf; 146 char *debugbuf;
117#define G_DEBUGBUF_SIZE 8192 147#define G_DEBUGBUF_SIZE 8192
118#endif 148#endif
119 149
120 /* The peer currently in the working */ 150 /* The peer currently in the working */
121 ot_peer peer; 151 ot_peer6 peer; /* Can fit v6 and v4 peers */
122 152
123 /* Pointers into the request buffer */ 153 /* Pointers into the request buffer */
124 ot_hash *hash; 154 ot_hash *hash;
125 char *peer_id; 155 char *peer_id;
126 156
127 /* HTTP specific, non static */ 157 /* HTTP specific, non static */
128 int keep_alive;
129 char *request; 158 char *request;
130 ssize_t request_size; 159 ssize_t request_size;
131 ssize_t header_size; 160 ssize_t header_size;
132 char *reply; 161 char *reply;
133 ssize_t reply_size; 162 ssize_t reply_size;
163
164 /* Entropy state for rand48 function so that threads don't need to acquire mutexes for
165 global random() or arc4random() state, which causes heavy load on linuxes */
166 uint16_t rand48_state[3];
167
168 int keep_alive;
134}; 169};
135 170
136/* 171/*
@@ -142,31 +177,34 @@ struct ot_workstruct {
142#endif 177#endif
143 178
144#ifdef WANT_SYNC 179#ifdef WANT_SYNC
145#define WANT_SYNC_PARAM( param ) , param 180#define WANT_SYNC_PARAM(param) , param
146#else 181#else
147#define WANT_SYNC_PARAM( param ) 182#define WANT_SYNC_PARAM(param)
148#endif 183#endif
149 184
150#ifdef WANT_LOG_NETWORKS 185#ifdef WANT_LOG_NETWORKS
151#error Live logging networks disabled at the moment. 186#error Live logging networks disabled at the moment.
152#endif 187#endif
153 188
154void trackerlogic_init( ); 189void trackerlogic_init(void);
155void trackerlogic_deinit( void ); 190void trackerlogic_deinit(void);
156void exerr( char * message ); 191void exerr(char *message);
157 192
158/* add_peer_to_torrent does only release the torrent bucket if from_sync is set, 193/* add_peer_to_torrent does only release the torrent bucket if from_sync is set,
159 otherwise it is released in return_peers_for_torrent */ 194 otherwise it is released in return_peers_for_torrent */
160size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount ); 195size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount);
161size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws ); 196size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws);
162size_t return_tcp_scrape_for_torrent( ot_hash *hash, int amount, char *reply ); 197size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply);
163size_t return_udp_scrape_for_torrent( ot_hash hash, char *reply ); 198size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply);
164void add_torrent_from_saved_state( ot_hash hash, ot_time base, size_t down_count ); 199void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count);
200#ifdef _DEBUG_RANDOMTORRENTS
201void trackerlogic_add_random_torrents(size_t amount);
202#endif
165 203
166/* torrent iterator */ 204/* torrent iterator */
167void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ); 205void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data);
168 206
169/* Helper, before it moves to its own object */ 207/* Helper, before it moves to its own object */
170void free_peerlist( ot_peerlist *peer_list ); 208void free_peerlist(ot_peerlist *peer_list);
171 209
172#endif 210#endif