diff options
-rw-r--r-- | .clang-format | 246 | ||||
-rw-r--r-- | .gitignore | 2 | ||||
-rw-r--r-- | Makefile | 26 | ||||
-rw-r--r-- | man1/opentracker.1 | 142 | ||||
-rw-r--r-- | man4/opentracker.conf.4 | 86 | ||||
-rw-r--r-- | opentracker.c | 890 | ||||
-rw-r--r-- | opentracker.conf.sample | 52 | ||||
-rw-r--r-- | ot_accesslist.c | 506 | ||||
-rw-r--r-- | ot_accesslist.h | 52 | ||||
-rw-r--r-- | ot_clean.c | 137 | ||||
-rw-r--r-- | ot_clean.h | 10 | ||||
-rw-r--r-- | ot_fullscrape.c | 470 | ||||
-rw-r--r-- | ot_fullscrape.h | 8 | ||||
-rw-r--r-- | ot_http.c | 836 | ||||
-rw-r--r-- | ot_http.h | 15 | ||||
-rw-r--r-- | ot_iovec.c | 76 | ||||
-rw-r--r-- | ot_iovec.h | 11 | ||||
-rw-r--r-- | ot_livesync.c | 205 | ||||
-rw-r--r-- | ot_livesync.h | 16 | ||||
-rw-r--r-- | ot_mutex.c | 308 | ||||
-rw-r--r-- | ot_mutex.h | 113 | ||||
-rw-r--r-- | ot_rijndael.c | 2 | ||||
-rw-r--r-- | ot_stats.c | 992 | ||||
-rw-r--r-- | ot_stats.h | 24 | ||||
-rw-r--r-- | ot_sync.c | 118 | ||||
-rw-r--r-- | ot_sync.h | 8 | ||||
-rw-r--r-- | ot_udp.c | 265 | ||||
-rw-r--r-- | ot_udp.h | 4 | ||||
-rw-r--r-- | ot_vector.c | 242 | ||||
-rw-r--r-- | ot_vector.h | 24 | ||||
-rw-r--r-- | proxy.c | 852 | ||||
-rw-r--r-- | scan_urlencoded_query.c | 99 | ||||
-rw-r--r-- | scan_urlencoded_query.h | 6 | ||||
-rw-r--r-- | tests/testsuite2.sh | 24 | ||||
-rw-r--r-- | trackerlogic.c | 635 | ||||
-rw-r--r-- | trackerlogic.h | 168 |
36 files changed, 4580 insertions, 3090 deletions
diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..cf3c715 --- /dev/null +++ b/.clang-format | |||
@@ -0,0 +1,246 @@ | |||
1 | --- | ||
2 | Language: Cpp | ||
3 | # BasedOnStyle: LLVM | ||
4 | AccessModifierOffset: -2 | ||
5 | AlignAfterOpenBracket: Align | ||
6 | AlignArrayOfStructures: None | ||
7 | AlignConsecutiveAssignments: | ||
8 | Enabled: true | ||
9 | AcrossEmptyLines: true | ||
10 | AcrossComments: true | ||
11 | AlignCompound: true | ||
12 | AlignFunctionPointers: false | ||
13 | PadOperators: true | ||
14 | AlignConsecutiveBitFields: | ||
15 | Enabled: false | ||
16 | AcrossEmptyLines: true | ||
17 | AcrossComments: true | ||
18 | AlignCompound: false | ||
19 | AlignFunctionPointers: false | ||
20 | PadOperators: false | ||
21 | AlignConsecutiveDeclarations: | ||
22 | Enabled: true | ||
23 | AcrossEmptyLines: true | ||
24 | AcrossComments: true | ||
25 | AlignCompound: true | ||
26 | AlignFunctionPointers: false | ||
27 | PadOperators: true | ||
28 | AlignConsecutiveMacros: | ||
29 | Enabled: true | ||
30 | AcrossEmptyLines: true | ||
31 | AcrossComments: true | ||
32 | AlignCompound: true | ||
33 | AlignFunctionPointers: false | ||
34 | PadOperators: false | ||
35 | AlignConsecutiveShortCaseStatements: | ||
36 | Enabled: true | ||
37 | AcrossEmptyLines: true | ||
38 | AcrossComments: true | ||
39 | AlignCaseColons: false | ||
40 | AlignEscapedNewlines: Right | ||
41 | AlignOperands: Align | ||
42 | AlignTrailingComments: | ||
43 | Kind: Always | ||
44 | OverEmptyLines: 0 | ||
45 | AllowAllArgumentsOnNextLine: true | ||
46 | AllowAllParametersOfDeclarationOnNextLine: true | ||
47 | AllowBreakBeforeNoexceptSpecifier: Never | ||
48 | AllowShortBlocksOnASingleLine: Never | ||
49 | AllowShortCaseLabelsOnASingleLine: false | ||
50 | AllowShortCompoundRequirementOnASingleLine: true | ||
51 | AllowShortEnumsOnASingleLine: true | ||
52 | AllowShortFunctionsOnASingleLine: All | ||
53 | AllowShortIfStatementsOnASingleLine: Never | ||
54 | AllowShortLambdasOnASingleLine: All | ||
55 | AllowShortLoopsOnASingleLine: false | ||
56 | AlwaysBreakAfterDefinitionReturnType: None | ||
57 | AlwaysBreakAfterReturnType: None | ||
58 | AlwaysBreakBeforeMultilineStrings: false | ||
59 | AlwaysBreakTemplateDeclarations: MultiLine | ||
60 | AttributeMacros: | ||
61 | - __capability | ||
62 | BinPackArguments: true | ||
63 | BinPackParameters: true | ||
64 | BitFieldColonSpacing: Both | ||
65 | BraceWrapping: | ||
66 | AfterCaseLabel: false | ||
67 | AfterClass: false | ||
68 | AfterControlStatement: Never | ||
69 | AfterEnum: false | ||
70 | AfterExternBlock: false | ||
71 | AfterFunction: false | ||
72 | AfterNamespace: false | ||
73 | AfterObjCDeclaration: false | ||
74 | AfterStruct: false | ||
75 | AfterUnion: false | ||
76 | BeforeCatch: false | ||
77 | BeforeElse: false | ||
78 | BeforeLambdaBody: false | ||
79 | BeforeWhile: false | ||
80 | IndentBraces: false | ||
81 | SplitEmptyFunction: true | ||
82 | SplitEmptyRecord: true | ||
83 | SplitEmptyNamespace: true | ||
84 | BreakAdjacentStringLiterals: true | ||
85 | BreakAfterAttributes: Leave | ||
86 | BreakAfterJavaFieldAnnotations: false | ||
87 | BreakArrays: true | ||
88 | BreakBeforeBinaryOperators: None | ||
89 | BreakBeforeConceptDeclarations: Always | ||
90 | BreakBeforeBraces: Attach | ||
91 | BreakBeforeInlineASMColon: OnlyMultiline | ||
92 | BreakBeforeTernaryOperators: true | ||
93 | BreakConstructorInitializers: BeforeColon | ||
94 | BreakInheritanceList: BeforeColon | ||
95 | BreakStringLiterals: false | ||
96 | ColumnLimit: 160 | ||
97 | CommentPragmas: '^ IWYU pragma:' | ||
98 | CompactNamespaces: false | ||
99 | ConstructorInitializerIndentWidth: 4 | ||
100 | ContinuationIndentWidth: 4 | ||
101 | Cpp11BracedListStyle: true | ||
102 | DerivePointerAlignment: false | ||
103 | DisableFormat: false | ||
104 | EmptyLineAfterAccessModifier: Never | ||
105 | EmptyLineBeforeAccessModifier: LogicalBlock | ||
106 | ExperimentalAutoDetectBinPacking: false | ||
107 | FixNamespaceComments: true | ||
108 | ForEachMacros: | ||
109 | - foreach | ||
110 | - Q_FOREACH | ||
111 | - BOOST_FOREACH | ||
112 | IfMacros: | ||
113 | - KJ_IF_MAYBE | ||
114 | IncludeBlocks: Preserve | ||
115 | IncludeCategories: | ||
116 | - Regex: '^"(llvm|llvm-c|clang|clang-c)/' | ||
117 | Priority: 2 | ||
118 | SortPriority: 0 | ||
119 | CaseSensitive: false | ||
120 | - Regex: '^(<|"(gtest|gmock|isl|json)/)' | ||
121 | Priority: 3 | ||
122 | SortPriority: 0 | ||
123 | CaseSensitive: false | ||
124 | - Regex: '.*' | ||
125 | Priority: 1 | ||
126 | SortPriority: 0 | ||
127 | CaseSensitive: false | ||
128 | IncludeIsMainRegex: '(Test)?$' | ||
129 | IncludeIsMainSourceRegex: '' | ||
130 | IndentAccessModifiers: false | ||
131 | IndentCaseBlocks: false | ||
132 | IndentCaseLabels: false | ||
133 | IndentExternBlock: AfterExternBlock | ||
134 | IndentGotoLabels: true | ||
135 | IndentPPDirectives: None | ||
136 | IndentRequiresClause: true | ||
137 | IndentWidth: 2 | ||
138 | IndentWrappedFunctionNames: false | ||
139 | InsertBraces: false | ||
140 | InsertNewlineAtEOF: false | ||
141 | InsertTrailingCommas: None | ||
142 | IntegerLiteralSeparator: | ||
143 | Binary: 0 | ||
144 | BinaryMinDigits: 0 | ||
145 | Decimal: 0 | ||
146 | DecimalMinDigits: 0 | ||
147 | Hex: 0 | ||
148 | HexMinDigits: 0 | ||
149 | JavaScriptQuotes: Leave | ||
150 | JavaScriptWrapImports: true | ||
151 | KeepEmptyLinesAtTheStartOfBlocks: true | ||
152 | KeepEmptyLinesAtEOF: false | ||
153 | LambdaBodyIndentation: Signature | ||
154 | LineEnding: DeriveLF | ||
155 | MacroBlockBegin: '' | ||
156 | MacroBlockEnd: '' | ||
157 | MaxEmptyLinesToKeep: 1 | ||
158 | NamespaceIndentation: None | ||
159 | ObjCBinPackProtocolList: Auto | ||
160 | ObjCBlockIndentWidth: 2 | ||
161 | ObjCBreakBeforeNestedBlockParam: true | ||
162 | ObjCSpaceAfterProperty: false | ||
163 | ObjCSpaceBeforeProtocolList: true | ||
164 | PackConstructorInitializers: BinPack | ||
165 | PenaltyBreakAssignment: 2 | ||
166 | PenaltyBreakBeforeFirstCallParameter: 19 | ||
167 | PenaltyBreakComment: 300 | ||
168 | PenaltyBreakFirstLessLess: 120 | ||
169 | PenaltyBreakOpenParenthesis: 0 | ||
170 | PenaltyBreakScopeResolution: 500 | ||
171 | PenaltyBreakString: 1000 | ||
172 | PenaltyBreakTemplateDeclaration: 10 | ||
173 | PenaltyExcessCharacter: 1000000 | ||
174 | PenaltyIndentedWhitespace: 0 | ||
175 | PenaltyReturnTypeOnItsOwnLine: 60 | ||
176 | PointerAlignment: Right | ||
177 | PPIndentWidth: -1 | ||
178 | QualifierAlignment: Leave | ||
179 | ReferenceAlignment: Pointer | ||
180 | ReflowComments: true | ||
181 | RemoveBracesLLVM: false | ||
182 | RemoveParentheses: Leave | ||
183 | RemoveSemicolon: false | ||
184 | RequiresClausePosition: OwnLine | ||
185 | RequiresExpressionIndentation: OuterScope | ||
186 | SeparateDefinitionBlocks: Leave | ||
187 | ShortNamespaceLines: 1 | ||
188 | SkipMacroDefinitionBody: false | ||
189 | SortIncludes: CaseSensitive | ||
190 | SortJavaStaticImport: Before | ||
191 | SortUsingDeclarations: LexicographicNumeric | ||
192 | SpaceAfterCStyleCast: false | ||
193 | SpaceAfterLogicalNot: false | ||
194 | SpaceAfterTemplateKeyword: true | ||
195 | SpaceAroundPointerQualifiers: Default | ||
196 | SpaceBeforeAssignmentOperators: true | ||
197 | SpaceBeforeCaseColon: false | ||
198 | SpaceBeforeCpp11BracedList: false | ||
199 | SpaceBeforeCtorInitializerColon: true | ||
200 | SpaceBeforeInheritanceColon: true | ||
201 | SpaceBeforeJsonColon: false | ||
202 | SpaceBeforeParens: ControlStatements | ||
203 | SpaceBeforeParensOptions: | ||
204 | AfterControlStatements: true | ||
205 | AfterForeachMacros: true | ||
206 | AfterFunctionDefinitionName: false | ||
207 | AfterFunctionDeclarationName: false | ||
208 | AfterIfMacros: true | ||
209 | AfterOverloadedOperator: false | ||
210 | AfterPlacementOperator: true | ||
211 | AfterRequiresInClause: false | ||
212 | AfterRequiresInExpression: false | ||
213 | BeforeNonEmptyParentheses: false | ||
214 | SpaceBeforeRangeBasedForLoopColon: true | ||
215 | SpaceBeforeSquareBrackets: false | ||
216 | SpaceInEmptyBlock: false | ||
217 | SpacesBeforeTrailingComments: 1 | ||
218 | SpacesInAngles: Never | ||
219 | SpacesInContainerLiterals: true | ||
220 | SpacesInLineCommentPrefix: | ||
221 | Minimum: 1 | ||
222 | Maximum: -1 | ||
223 | SpacesInParens: Never | ||
224 | SpacesInParensOptions: | ||
225 | InCStyleCasts: false | ||
226 | InConditionalStatements: false | ||
227 | InEmptyParentheses: false | ||
228 | Other: false | ||
229 | SpacesInSquareBrackets: false | ||
230 | Standard: Latest | ||
231 | StatementAttributeLikeMacros: | ||
232 | - Q_EMIT | ||
233 | StatementMacros: | ||
234 | - Q_UNUSED | ||
235 | - QT_REQUIRE_VERSION | ||
236 | TabWidth: 8 | ||
237 | UseTab: Never | ||
238 | VerilogBreakBetweenInstancePorts: true | ||
239 | WhitespaceSensitiveMacros: | ||
240 | - BOOST_PP_STRINGIZE | ||
241 | - CF_SWIFT_NAME | ||
242 | - NS_SWIFT_NAME | ||
243 | - PP_STRINGIZE | ||
244 | - STRINGIZE | ||
245 | ... | ||
246 | |||
diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..874c63c --- /dev/null +++ b/.gitignore | |||
@@ -0,0 +1,2 @@ | |||
1 | *.o | ||
2 | |||
@@ -1,7 +1,5 @@ | |||
1 | # $Id$ | 1 | # $Id$ |
2 | 2 | ||
3 | CC?=gcc | ||
4 | |||
5 | # Linux flavour | 3 | # Linux flavour |
6 | # PREFIX?=/opt/diet | 4 | # PREFIX?=/opt/diet |
7 | # LIBOWFAT_HEADERS=$(PREFIX)/include | 5 | # LIBOWFAT_HEADERS=$(PREFIX)/include |
@@ -20,15 +18,20 @@ LIBOWFAT_LIBRARY=$(PREFIX)/libowfat | |||
20 | BINDIR?=$(PREFIX)/bin | 18 | BINDIR?=$(PREFIX)/bin |
21 | STRIP?=strip | 19 | STRIP?=strip |
22 | 20 | ||
23 | #FEATURES+=-DWANT_V6 | 21 | #FEATURES+=-DWAND_V4_ONLY |
24 | |||
25 | #FEATURES+=-DWANT_ACCESSLIST_BLACK | 22 | #FEATURES+=-DWANT_ACCESSLIST_BLACK |
26 | #FEATURES+=-DWANT_ACCESSLIST_WHITE | 23 | #FEATURES+=-DWANT_ACCESSLIST_WHITE |
24 | #FEATURES+=-DWANT_DYNAMIC_ACCESSLIST | ||
27 | 25 | ||
28 | #FEATURES+=-DWANT_SYNC_LIVE | 26 | #FEATURES+=-DWANT_SYNC_LIVE |
29 | #FEATURES+=-DWANT_IP_FROM_QUERY_STRING | 27 | #FEATURES+=-DWANT_IP_FROM_QUERY_STRING |
30 | #FEATURES+=-DWANT_COMPRESSION_GZIP | 28 | FEATURES+=-DWANT_COMPRESSION_GZIP |
31 | #FEATURES+=-DWANT_COMPRESSION_GZIP_ALWAYS | 29 | FEATURES+=-DWANT_COMPRESSION_GZIP_ALWAYS |
30 | |||
31 | #FEATURES+=-DWANT_COMPRESSION_ZSTD | ||
32 | #FEATURES+=-DWANT_COMPRESSION_ZSTD_ALWAYS | ||
33 | #LDFLAGS+=-lzstd | ||
34 | |||
32 | #FEATURES+=-DWANT_LOG_NETWORKS | 35 | #FEATURES+=-DWANT_LOG_NETWORKS |
33 | #FEATURES+=-DWANT_RESTRICT_STATS | 36 | #FEATURES+=-DWANT_RESTRICT_STATS |
34 | #FEATURES+=-DWANT_IP_FROM_PROXY | 37 | #FEATURES+=-DWANT_IP_FROM_PROXY |
@@ -40,17 +43,24 @@ STRIP?=strip | |||
40 | #FEATURES+=-DWANT_DEV_RANDOM | 43 | #FEATURES+=-DWANT_DEV_RANDOM |
41 | FEATURES+=-DWANT_FULLSCRAPE | 44 | FEATURES+=-DWANT_FULLSCRAPE |
42 | 45 | ||
46 | # You need libowfat version 0.34 to allow for automatic release of chunks during | ||
47 | # full scrape transfer, if you rely on an older versions, enable this flag | ||
48 | #FEATURES+=-DWANT_NO_AUTO_FREE | ||
49 | |||
43 | # Is enabled on BSD systems by default in trackerlogic.h | 50 | # Is enabled on BSD systems by default in trackerlogic.h |
44 | # on Linux systems you will need -lbds | 51 | # on Linux systems you will need -lbds |
45 | #FEATURES+=-DWANT_ARC4RANDOM | 52 | #FEATURES+=-DWANT_ARC4RANDOM |
46 | 53 | ||
47 | #FEATURES+=-D_DEBUG_HTTPERROR | 54 | #FEATURES+=-D_DEBUG_HTTPERROR |
55 | #FEATURES+=-D_DEBUG_RANDOMTORRENTS | ||
56 | |||
57 | GIT_VERSION=$(shell sh -c 'command -v git >/dev/null && test -d .git && git rev-parse HEAD || echo _git_or_commit_not_found_') | ||
48 | 58 | ||
49 | OPTS_debug=-D_DEBUG -g -ggdb # -pg -fprofile-arcs -ftest-coverage | 59 | OPTS_debug=-D_DEBUG -g -ggdb # -pg -fprofile-arcs -ftest-coverage |
50 | OPTS_production=-O3 | 60 | OPTS_production=-O3 |
51 | 61 | ||
52 | CFLAGS+=-I$(LIBOWFAT_HEADERS) -Wall -pipe -Wextra #-ansi -pedantic | 62 | CFLAGS+=-I$(LIBOWFAT_HEADERS) -DGIT_VERSION=$(GIT_VERSION) -Wall -pipe -pthread -Wextra #-ansi -pedantic |
53 | LDFLAGS+=-L$(LIBOWFAT_LIBRARY) -lowfat -pthread -lpthread -lz | 63 | LDFLAGS+=-L$(LIBOWFAT_LIBRARY) -lowfat -pthread -lz |
54 | #LDFLAGS+=-lbsd | 64 | #LDFLAGS+=-lbsd |
55 | 65 | ||
56 | BINARY =opentracker | 66 | BINARY =opentracker |
diff --git a/man1/opentracker.1 b/man1/opentracker.1 new file mode 100644 index 0000000..85ded7b --- /dev/null +++ b/man1/opentracker.1 | |||
@@ -0,0 +1,142 @@ | |||
1 | .Dd 15/4/2024 | ||
2 | .Dt opentracker 1 | ||
3 | .Os Unix | ||
4 | .Sh opentracker | ||
5 | .Nm opentracker | ||
6 | .Nd a free and open bittorrent tracker | ||
7 | .Sh SYNOPSIS | ||
8 | .Nm | ||
9 | .Op Fl f Ar config | ||
10 | .Op Fl i Ar ip-select | ||
11 | .Op Fl p Ar port-bind-tcp | ||
12 | .Op Fl P Ar port-bind-udp | ||
13 | .Op Fl A Ar blessed-ip | ||
14 | .Op Fl r Ar redirect-url | ||
15 | .Op Fl d Ar chdir | ||
16 | .Op Fl u Ar user | ||
17 | .Op Fl w| Fl b accesslist | ||
18 | .Sh DESCRIPTION | ||
19 | .Nm | ||
20 | is a bittorrent tracker that implements announce and scrape actions over the | ||
21 | UDP and the plain http protocol, aiming for minimal resource usage. | ||
22 | .Pp | ||
23 | |||
24 | When invoked with parameters, it binds to TCP and UDP port 6969 on all | ||
25 | interfaces. The recommended way to configure opentracker is by providing a | ||
26 | config file using the | ||
27 | .Op Fl f Ar config | ||
28 | option. See | ||
29 | .Xr opentracker.conf 4 | ||
30 | for details. | ||
31 | .Pp | ||
32 | |||
33 | .Sh OPTIONS | ||
34 | The following options are available: | ||
35 | |||
36 | .Bl -tag -width -indent=8 | ||
37 | .It Fl f Ar config | ||
38 | Parse a config file with a list of options. Consecutive command options | ||
39 | will override options from the config file. See | ||
40 | .Xr opentracker.conf 4 | ||
41 | for details. | ||
42 | |||
43 | .It Fl i Ar ip-select | ||
44 | Select an ip address that will be used with the next | ||
45 | .Op Fl p | ||
46 | or | ||
47 | .Op Fl P | ||
48 | command to actually bind to this address. Setting this option without any bind | ||
49 | options in the config file or | ||
50 | .Op Fl p | ||
51 | or | ||
52 | .Op Fl P | ||
53 | commands will limit opentracker to only bind to this address. | ||
54 | .It Fl p Ar port-bind-tcp | ||
55 | Bind to the TCP port on the last preceding ip address set with the | ||
56 | .Op Fl i ip-select | ||
57 | option or to all available addresses if none has been set. Can be given multiple | ||
58 | times. | ||
59 | .It Fl P Ar port-bind-udp | ||
60 | Bind to the UDP port on the last preceding ip address set with the | ||
61 | .Op Fl i ip-select | ||
62 | option or to all available addresses if none has been set. Can be given multiple | ||
63 | times. | ||
64 | .It Fl A Ar blessed-ip | ||
65 | Set an ip address in IPv4 or IPv6 or a net in CIDR notation to bless the network | ||
66 | for access to restricted resources. | ||
67 | .It Fl r Ar redirect-url | ||
68 | Set the URL that | ||
69 | .Nm | ||
70 | will redirect users to when the / address is requested via HTTP. | ||
71 | .It Fl d Ar chdir | ||
72 | Sets the directory | ||
73 | .Nm | ||
74 | will | ||
75 | .Xr chroot 2 | ||
76 | to if ran as root or | ||
77 | .Xr chdir 2 | ||
78 | to if ran as unprivileged user. Note that any accesslist files need to be | ||
79 | relative to and within that directory. | ||
80 | .It Fl u Ar user | ||
81 | User to run | ||
82 | .Nm | ||
83 | under after all operations that need privileges have finished. | ||
84 | .It Fl w Ar accesslist | Fl b Ar accesslist | ||
85 | If | ||
86 | .Nm | ||
87 | has been compiled with the | ||
88 | .B WANT_ACCESSLIST_BLACK | ||
89 | or | ||
90 | .Br WANT_ACCESSLIST_WHITE | ||
91 | options, this option sets the location of the accesslist. | ||
92 | .El | ||
93 | |||
94 | .Sh EXAMPLES | ||
95 | |||
96 | Start | ||
97 | .Nm | ||
98 | bound on UDP and TCP ports 6969 on IPv6 localhost. | ||
99 | |||
100 | .Dl # ./opentracker -i ::1 -p 6969 -P 6969 | ||
101 | |||
102 | .Pp | ||
103 | Start | ||
104 | .Nm | ||
105 | bound on UDP port 6868 and TCP port 6868 on IPv4 localhost and allow | ||
106 | privileged access from the network 192.168/16 while redirecting | ||
107 | HTTP clients accessing the root directory, which is not covered by the | ||
108 | bittorrent tracker protocol, to https://my-trackersite.com/. | ||
109 | |||
110 | .Dl # ./opentracker -i 192.168.0.4 -p 6868 -P 6969 -A 192.168/16 -r https://my-trackersite.com/ | ||
111 | |||
112 | The announce URLs are http://192.168.0.4:6868/announce and | ||
113 | udp://192.168.0.4:6868/announce respectively. | ||
114 | |||
115 | .Sh FILES | ||
116 | .Bl -tag -width indent | ||
117 | .It Pa opentracker.conf | ||
118 | The | ||
119 | .Nm | ||
120 | config file. | ||
121 | .El | ||
122 | .Sh SEE ALSO | ||
123 | .Xr opentracker.conf 4 | ||
124 | .Pp | ||
125 | opentracker documentation | ||
126 | .Lk https://erdgeist.org/arts/software/opentracker | ||
127 | .Pp | ||
128 | Bittorrent tracker protocol | ||
129 | .Lk http://www.bittorrent.org/beps/bep_0015.html | ||
130 | .Sh | ||
131 | .Sh AUTHOR | ||
132 | .An Dirk Engling | ||
133 | .Aq Mt erdgeist@erdgeist.org . | ||
134 | .Sh LICENSE | ||
135 | This software is released under the Beerware License: | ||
136 | |||
137 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software | ||
138 | and associated documentation files (the "Software"), to deal in the Software with the following | ||
139 | terms and conditions: | ||
140 | |||
141 | If you meet the author(s) someday, and you think this software is worth it, you can buy them | ||
142 | a beer in return. | ||
diff --git a/man4/opentracker.conf.4 b/man4/opentracker.conf.4 new file mode 100644 index 0000000..b4f5f51 --- /dev/null +++ b/man4/opentracker.conf.4 | |||
@@ -0,0 +1,86 @@ | |||
1 | .Dd 2024-04-18 | ||
2 | .Dt opentracker.conf 5 | ||
3 | .Os Unix | ||
4 | .Sh NAME | ||
5 | .Nm opentracker.conf | ||
6 | .Nd configuration file for opentracker | ||
7 | .Sh SYNOPSIS | ||
8 | .Nm | ||
9 | .Sh DESCRIPTION | ||
10 | The | ||
11 | .Nm | ||
12 | configuration file specifies various options for configuring the behavior of the opentracker program. | ||
13 | .Pp | ||
14 | Lines starting with '#' are comments and are ignored. Options are specified as 'keyword value' pairs. | ||
15 | .Pp | ||
16 | The following options are available: | ||
17 | |||
18 | .Bl -tag -width ".It access.proxy" -compact | ||
19 | .It listen.tcp_udp Ar address | ||
20 | Specifies an address opentracker will listen on for both TCP and UDP connections. If none are specified, opentracker listens on 0.0.0.0:6969 by default. Can be added more than once. | ||
21 | |||
22 | .It listen.tcp Ar address | ||
23 | Specifies the address opentracker will listen on for TCP connections. Can be added more than once. | ||
24 | |||
25 | .It listen.udp Ar address | ||
26 | Specifies the address opentracker will listen on for UDP connections. Can be added more than once. | ||
27 | |||
28 | .It listen.udp.workers Ar threads | ||
29 | Specifies how many threads will be spawned to handle UDP connections. Defaults to 4. | ||
30 | |||
31 | .It access.whitelist Ar path/to/whitelist | ||
32 | Specifies the path to the whitelist file containing all torrent hashes that opentracker will serve. Use this option if opentracker runs in a non-open mode. | ||
33 | |||
34 | .It access.blacklist Ar path/to/blacklist | ||
35 | Specifies the path to the blacklist file containing all torrent hashes that opentracker will not serve. Use this option if opentracker was compiled to allow blacklisting. | ||
36 | |||
37 | .It access.fifo_add Ar path/to/adder.fifo | ||
38 | Specifies the path to the FIFO (named pipe) used for dynamic changesets to accesslists. Info hashes written to this FIFO will be added to the main accesslist file. | ||
39 | |||
40 | .It access.fifo_delete Ar path/to/deleter.fifo | ||
41 | Specifies the path to the FIFO (named pipe) used for dynamic changesets to accesslists. Info hashes written to this FIFO will be removed from the main accesslist file. | ||
42 | |||
43 | .It access.stats Ar ip_address_or_network | ||
44 | Specifies the IP address or network in CIDR notation allowed to fetch stats from opentracker. | ||
45 | |||
46 | .It access.stats_path Ar path | ||
47 | Specifies the path to the stats location. You can configure opentracker to appear anywhere on your tracker. Defaults to /stats. | ||
48 | |||
49 | .It access.proxy Ar ip_address_or_network | ||
50 | Specifies the IP address or network of the reverse proxies. Opentracker will take the X-Forwarded-For address instead of the source IP address. Can be added more than once. | ||
51 | |||
52 | .It livesync.cluster.listen Ar ip_address:port | ||
53 | Specifies the IP address and port opentracker will listen on for incoming live sync packets to keep a cluster of opentrackers synchronized. | ||
54 | |||
55 | .It livesync.cluster.node_ip Ar ip_address | ||
56 | Specifies one trusted IP address for sync between trackers running in a cluster. Can be added more than once. | ||
57 | |||
58 | .It batchsync.cluster.admin_ip Ar ip_address | ||
59 | Specifies the admin IP address for old-style (HTTP-based) asynchronous tracker syncing. | ||
60 | |||
61 | .It tracker.rootdir Ar path | ||
62 | Specifies the directory opentracker will chroot/chdir to. All black/white list files must be located in this directory. | ||
63 | |||
64 | .It tracker.user Ar username | ||
65 | Specifies the user opentracker will setuid to after binding to potentially privileged ports. | ||
66 | |||
67 | .It tracker.redirect_url Ar URL | ||
68 | Specifies the URL opentracker will redirect to in response to a "GET / HTTP" request. | ||
69 | |||
70 | .Sh EXAMPLES | ||
71 | To specify the address opentracker will listen on for both TCP and UDP connections: | ||
72 | .Dl listen.tcp_udp 0.0.0.0:6969 | ||
73 | .Pp | ||
74 | To specify the address opentracker will listen on for TCP connections: | ||
75 | .Dl listen.tcp 0.0.0.0 | ||
76 | .Pp | ||
77 | To specify the address opentracker will listen on for UDP connections: | ||
78 | .Dl listen.udp 0.0.0.0:6969 | ||
79 | |||
80 | .Sh SEE ALSO | ||
81 | .Xr opentracker 1 | ||
82 | |||
83 | .Sh AUTHOR | ||
84 | .An Dirk Engling | ||
85 | .Aq Mt erdgeist@erdgeist.org | ||
86 | |||
diff --git a/opentracker.c b/opentracker.c index 2bb66fa..14e9989 100644 --- a/opentracker.c +++ b/opentracker.c | |||
@@ -5,59 +5,59 @@ | |||
5 | $Id$ */ | 5 | $Id$ */ |
6 | 6 | ||
7 | /* System */ | 7 | /* System */ |
8 | #include <stdlib.h> | ||
9 | #include <string.h> | ||
10 | #include <arpa/inet.h> | 8 | #include <arpa/inet.h> |
11 | #include <sys/socket.h> | 9 | #include <ctype.h> |
12 | #include <unistd.h> | ||
13 | #include <errno.h> | 10 | #include <errno.h> |
11 | #include <pthread.h> | ||
12 | #include <pwd.h> | ||
14 | #include <signal.h> | 13 | #include <signal.h> |
15 | #include <stdio.h> | 14 | #include <stdio.h> |
16 | #include <pwd.h> | 15 | #include <stdlib.h> |
17 | #include <ctype.h> | 16 | #include <string.h> |
18 | #include <pthread.h> | 17 | #include <sys/socket.h> |
18 | #include <unistd.h> | ||
19 | #ifdef WANT_SYSLOGS | 19 | #ifdef WANT_SYSLOGS |
20 | #include <syslog.h> | 20 | #include <syslog.h> |
21 | #endif | 21 | #endif |
22 | 22 | ||
23 | /* Libowfat */ | 23 | /* Libowfat */ |
24 | #include "socket.h" | 24 | #include "byte.h" |
25 | #include "io.h" | 25 | #include "io.h" |
26 | #include "iob.h" | 26 | #include "iob.h" |
27 | #include "byte.h" | ||
28 | #include "scan.h" | ||
29 | #include "ip6.h" | 27 | #include "ip6.h" |
28 | #include "scan.h" | ||
29 | #include "socket.h" | ||
30 | 30 | ||
31 | /* Opentracker */ | 31 | /* Opentracker */ |
32 | #include "trackerlogic.h" | ||
33 | #include "ot_mutex.h" | ||
34 | #include "ot_http.h" | ||
35 | #include "ot_udp.h" | ||
36 | #include "ot_accesslist.h" | 32 | #include "ot_accesslist.h" |
37 | #include "ot_stats.h" | 33 | #include "ot_http.h" |
38 | #include "ot_livesync.h" | 34 | #include "ot_livesync.h" |
35 | #include "ot_mutex.h" | ||
36 | #include "ot_stats.h" | ||
37 | #include "ot_udp.h" | ||
38 | #include "trackerlogic.h" | ||
39 | 39 | ||
40 | /* Globals */ | 40 | /* Globals */ |
41 | time_t g_now_seconds; | 41 | time_t g_now_seconds; |
42 | char * g_redirecturl; | 42 | char *g_redirecturl; |
43 | uint32_t g_tracker_id; | 43 | uint32_t g_tracker_id; |
44 | volatile int g_opentracker_running = 1; | 44 | volatile int g_opentracker_running = 1; |
45 | int g_self_pipe[2]; | 45 | int g_self_pipe[2]; |
46 | 46 | ||
47 | static char * g_serverdir; | 47 | static char *g_serverdir; |
48 | static char * g_serveruser; | 48 | static char *g_serveruser; |
49 | static unsigned int g_udp_workers; | 49 | static unsigned int g_udp_workers; |
50 | 50 | ||
51 | static void panic( const char *routing ) __attribute__ ((noreturn)); | 51 | static void panic(const char *routine) __attribute__((noreturn)); |
52 | static void panic( const char *routine ) { | 52 | static void panic(const char *routine) { |
53 | fprintf( stderr, "%s: %s\n", routine, strerror(errno) ); | 53 | fprintf(stderr, "%s: %s\n", routine, strerror(errno)); |
54 | exit( 111 ); | 54 | exit(111); |
55 | } | 55 | } |
56 | 56 | ||
57 | static void signal_handler( int s ) { | 57 | static void signal_handler(int s) { |
58 | if( s == SIGINT ) { | 58 | if (s == SIGINT) { |
59 | /* Any new interrupt signal quits the application */ | 59 | /* Any new interrupt signal quits the application */ |
60 | signal( SIGINT, SIG_DFL); | 60 | signal(SIGINT, SIG_DFL); |
61 | 61 | ||
62 | /* Tell all other threads to not acquire any new lock on a bucket | 62 | /* Tell all other threads to not acquire any new lock on a bucket |
63 | but cancel their operations and return */ | 63 | but cancel their operations and return */ |
@@ -69,216 +69,231 @@ static void signal_handler( int s ) { | |||
69 | closelog(); | 69 | closelog(); |
70 | #endif | 70 | #endif |
71 | 71 | ||
72 | exit( 0 ); | 72 | exit(0); |
73 | } else if( s == SIGALRM ) { | ||
74 | /* Maintain our copy of the clock. time() on BSDs is very expensive. */ | ||
75 | g_now_seconds = time(NULL); | ||
76 | alarm(5); | ||
77 | } | 73 | } |
78 | } | 74 | } |
79 | 75 | ||
80 | static void defaul_signal_handlers( void ) { | 76 | static void defaul_signal_handlers(void) { |
81 | sigset_t signal_mask; | 77 | sigset_t signal_mask; |
82 | sigemptyset(&signal_mask); | 78 | sigemptyset(&signal_mask); |
83 | sigaddset (&signal_mask, SIGPIPE); | 79 | sigaddset(&signal_mask, SIGPIPE); |
84 | sigaddset (&signal_mask, SIGHUP); | 80 | sigaddset(&signal_mask, SIGHUP); |
85 | sigaddset (&signal_mask, SIGINT); | 81 | sigaddset(&signal_mask, SIGINT); |
86 | sigaddset (&signal_mask, SIGALRM); | 82 | sigaddset(&signal_mask, SIGALRM); |
87 | pthread_sigmask (SIG_BLOCK, &signal_mask, NULL); | 83 | pthread_sigmask(SIG_BLOCK, &signal_mask, NULL); |
88 | } | 84 | } |
89 | 85 | ||
90 | static void install_signal_handlers( void ) { | 86 | static void install_signal_handlers(void) { |
91 | struct sigaction sa; | 87 | struct sigaction sa; |
92 | sigset_t signal_mask; | 88 | sigset_t signal_mask; |
93 | sigemptyset(&signal_mask); | 89 | sigemptyset(&signal_mask); |
94 | 90 | ||
95 | sa.sa_handler = signal_handler; | 91 | sa.sa_handler = signal_handler; |
96 | sigemptyset(&sa.sa_mask); | 92 | sigemptyset(&sa.sa_mask); |
97 | sa.sa_flags = SA_RESTART; | 93 | sa.sa_flags = SA_RESTART; |
98 | if ((sigaction(SIGINT, &sa, NULL) == -1) || (sigaction(SIGALRM, &sa, NULL) == -1) ) | 94 | if ((sigaction(SIGINT, &sa, NULL) == -1) || (sigaction(SIGALRM, &sa, NULL) == -1)) |
99 | panic( "install_signal_handlers" ); | 95 | panic("install_signal_handlers"); |
100 | 96 | ||
101 | sigaddset (&signal_mask, SIGINT); | 97 | sigaddset(&signal_mask, SIGINT); |
102 | sigaddset (&signal_mask, SIGALRM); | 98 | pthread_sigmask(SIG_UNBLOCK, &signal_mask, NULL); |
103 | pthread_sigmask (SIG_UNBLOCK, &signal_mask, NULL); | ||
104 | } | 99 | } |
105 | 100 | ||
106 | static void usage( char *name ) { | 101 | static void usage(char *name) { |
107 | fprintf( stderr, "Usage: %s [-i ip] [-p port] [-P port] [-r redirect] [-d dir] [-u user] [-A ip] [-f config] [-s livesyncport]" | 102 | fprintf(stderr, |
103 | "Usage: %s [-i ip] [-p port] [-P port] [-r redirect] [-d dir] [-u user] [-A ip[/bits]] [-f config] [-s livesyncport]" | ||
108 | #ifdef WANT_ACCESSLIST_BLACK | 104 | #ifdef WANT_ACCESSLIST_BLACK |
109 | " [-b blacklistfile]" | 105 | " [-b blacklistfile]" |
110 | #elif defined ( WANT_ACCESSLIST_WHITE ) | 106 | #elif defined(WANT_ACCESSLIST_WHITE) |
111 | " [-w whitelistfile]" | 107 | " [-w whitelistfile]" |
112 | #endif | 108 | #endif |
113 | "\n", name ); | 109 | "\n", |
110 | name); | ||
114 | } | 111 | } |
115 | 112 | ||
116 | #define HELPLINE(opt,desc) fprintf(stderr, "\t%-10s%s\n",opt,desc) | 113 | #define HELPLINE(opt, desc) fprintf(stderr, "\t%-10s%s\n", opt, desc) |
117 | static void help( char *name ) { | 114 | static void help(char *name) { |
118 | usage( name ); | 115 | usage(name); |
119 | 116 | ||
120 | HELPLINE("-f config","include and execute the config file"); | 117 | HELPLINE("-f config", "include and execute the config file"); |
121 | HELPLINE("-i ip","specify ip to bind to (default: *, you may specify more than one)"); | 118 | HELPLINE("-i ip", "specify ip to bind to with next -[pP] (default: any, overrides preceeding ones)"); |
122 | HELPLINE("-p port","specify tcp port to bind to (default: 6969, you may specify more than one)"); | 119 | HELPLINE("-p port", "do bind to tcp port (default: 6969, you may specify more than one)"); |
123 | HELPLINE("-P port","specify udp port to bind to (default: 6969, you may specify more than one)"); | 120 | HELPLINE("-P port", "do bind to udp port (default: 6969, you may specify more than one)"); |
124 | HELPLINE("-r redirecturl","specify url where / should be redirected to (default none)"); | 121 | HELPLINE("-r redirecturl", "specify url where / should be redirected to (default none)"); |
125 | HELPLINE("-d dir","specify directory to try to chroot to (default: \".\")"); | 122 | HELPLINE("-d dir", "specify directory to try to chroot to (default: \".\")"); |
126 | HELPLINE("-u user","specify user under whose privileges opentracker should run (default: \"nobody\")"); | 123 | HELPLINE("-u user", "specify user under whose privileges opentracker should run (default: \"nobody\")"); |
127 | HELPLINE("-A ip","bless an ip address as admin address (e.g. to allow syncs from this address)"); | 124 | HELPLINE("-A ip[/bits]", "bless an ip address or net as admin address (e.g. to allow syncs from this address)"); |
128 | #ifdef WANT_ACCESSLIST_BLACK | 125 | #ifdef WANT_ACCESSLIST_BLACK |
129 | HELPLINE("-b file","specify blacklist file."); | 126 | HELPLINE("-b file", "specify blacklist file."); |
130 | #elif defined( WANT_ACCESSLIST_WHITE ) | 127 | #elif defined(WANT_ACCESSLIST_WHITE) |
131 | HELPLINE("-w file","specify whitelist file."); | 128 | HELPLINE("-w file", "specify whitelist file."); |
132 | #endif | 129 | #endif |
133 | 130 | ||
134 | fprintf( stderr, "\nExample: ./opentracker -i 127.0.0.1 -p 6969 -P 6969 -f ./opentracker.conf -i 10.1.1.23 -p 2710 -p 80\n" ); | 131 | fprintf(stderr, "\nExample: ./opentracker -i 127.0.0.1 -p 6969 -P 6969 -f ./opentracker.conf -i 10.1.1.23 -p 2710 -p 80\n"); |
132 | fprintf(stderr, " Here -i 127.0.0.1 selects the ip address for the next -p 6969 and -P 6969.\n"); | ||
133 | fprintf(stderr, " If no port is bound from config file or command line, the last address given\n"); | ||
134 | fprintf(stderr, " (or ::1 if none is set) will be used on port 6969.\n"); | ||
135 | } | 135 | } |
136 | #undef HELPLINE | 136 | #undef HELPLINE |
137 | 137 | ||
138 | static size_t header_complete( char * request, ssize_t byte_count ) { | 138 | static ssize_t header_complete(char *request, ssize_t byte_count) { |
139 | int i = 0, state = 0; | 139 | ssize_t i = 0, state = 0; |
140 | 140 | ||
141 | for( i=1; i < byte_count; i+=2 ) | 141 | for (i = 1; i < byte_count; i += 2) |
142 | if( request[i] <= 13 ) { | 142 | if (request[i] <= 13) { |
143 | i--; | 143 | i--; |
144 | for( state = 0 ; i < byte_count; ++i ) { | 144 | for (state = 0; i < byte_count; ++i) { |
145 | char c = request[i]; | 145 | char c = request[i]; |
146 | if( c == '\r' || c == '\n' ) | 146 | if (c == '\r' || c == '\n') |
147 | state = ( state >> 2 ) | ( ( c << 6 ) & 0xc0 ); | 147 | state = (state >> 2) | ((c << 6) & 0xc0); |
148 | else | 148 | else |
149 | break; | 149 | break; |
150 | if( state >= 0xa0 || state == 0x99 ) return i + 1; | 150 | if (state >= 0xa0 || state == 0x99) |
151 | return i + 1; | ||
151 | } | 152 | } |
152 | } | 153 | } |
153 | return 0; | 154 | return 0; |
154 | } | 155 | } |
155 | 156 | ||
156 | static void handle_dead( const int64 sock ) { | 157 | static void handle_dead(const int64 sock) { |
157 | struct http_data* cookie=io_getcookie( sock ); | 158 | struct http_data *cookie = io_getcookie(sock); |
158 | if( cookie ) { | 159 | if (cookie) { |
159 | size_t i; | 160 | size_t i; |
160 | for ( i = 0; i < cookie->batches; ++i) | 161 | for (i = 0; i < cookie->batches; ++i) |
161 | iob_reset( cookie->batch + i ); | 162 | iob_reset(cookie->batch + i); |
162 | free( cookie->batch ); | 163 | free(cookie->batch); |
163 | array_reset( &cookie->request ); | 164 | array_reset(&cookie->request); |
164 | if( cookie->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK ) | 165 | if (cookie->flag & (STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER)) |
165 | mutex_workqueue_canceltask( sock ); | 166 | mutex_workqueue_canceltask(sock); |
166 | free( cookie ); | 167 | free(cookie); |
167 | } | 168 | } |
168 | io_close( sock ); | 169 | io_close(sock); |
169 | } | 170 | } |
170 | 171 | ||
171 | static void handle_read( const int64 sock, struct ot_workstruct *ws ) { | 172 | static void handle_read(const int64 sock, struct ot_workstruct *ws) { |
172 | struct http_data* cookie = io_getcookie( sock ); | 173 | struct http_data *cookie = io_getcookie(sock); |
173 | ssize_t byte_count = io_tryread( sock, ws->inbuf, G_INBUF_SIZE ); | 174 | ssize_t byte_count = io_tryread(sock, ws->inbuf, G_INBUF_SIZE); |
174 | 175 | ||
175 | if( byte_count == 0 || byte_count == -3 ) { | 176 | if (byte_count == 0 || byte_count == -3) { |
176 | handle_dead( sock ); | 177 | handle_dead(sock); |
177 | return; | 178 | return; |
178 | } | 179 | } |
179 | 180 | ||
181 | if (byte_count == -1) | ||
182 | return; | ||
183 | |||
180 | /* If we get the whole request in one packet, handle it without copying */ | 184 | /* If we get the whole request in one packet, handle it without copying */ |
181 | if( !array_start( &cookie->request ) ) { | 185 | if (!array_start(&cookie->request)) { |
182 | if( ( ws->header_size = header_complete( ws->inbuf, byte_count ) ) ) { | 186 | if ((ws->header_size = header_complete(ws->inbuf, byte_count))) { |
183 | ws->request = ws->inbuf; | 187 | ws->request = ws->inbuf; |
184 | ws->request_size = byte_count; | 188 | ws->request_size = byte_count; |
185 | http_handle_request( sock, ws ); | 189 | http_handle_request(sock, ws); |
186 | } else | 190 | } else |
187 | array_catb( &cookie->request, ws->inbuf, byte_count ); | 191 | array_catb(&cookie->request, ws->inbuf, (size_t)byte_count); |
188 | return; | 192 | return; |
189 | } | 193 | } |
190 | 194 | ||
191 | array_catb( &cookie->request, ws->inbuf, byte_count ); | 195 | array_catb(&cookie->request, ws->inbuf, byte_count); |
192 | if( array_failed( &cookie->request ) || array_bytes( &cookie->request ) > 8192 ) { | 196 | if (array_failed(&cookie->request) || array_bytes(&cookie->request) > 8192) { |
193 | http_issue_error( sock, ws, CODE_HTTPERROR_500 ); | 197 | http_issue_error(sock, ws, CODE_HTTPERROR_500); |
194 | return; | 198 | return; |
195 | } | 199 | } |
196 | 200 | ||
197 | while( ( ws->header_size = header_complete( array_start( &cookie->request ), array_bytes( &cookie->request ) ) ) ) { | 201 | while ((ws->header_size = header_complete(array_start(&cookie->request), array_bytes(&cookie->request)))) { |
198 | ws->request = array_start( &cookie->request ); | 202 | ws->request = array_start(&cookie->request); |
199 | ws->request_size = array_bytes( &cookie->request ); | 203 | ws->request_size = array_bytes(&cookie->request); |
200 | http_handle_request( sock, ws ); | 204 | http_handle_request(sock, ws); |
201 | #ifdef WANT_KEEPALIVE | 205 | #ifdef WANT_KEEPALIVE |
202 | if( !ws->keep_alive ) | 206 | if (!ws->keep_alive) |
203 | #endif | 207 | #endif |
204 | return; | 208 | return; |
205 | } | 209 | } |
206 | } | 210 | } |
207 | 211 | ||
208 | static void handle_write( const int64 sock ) { | 212 | static void handle_write(const int64 sock) { |
209 | struct http_data* cookie=io_getcookie( sock ); | 213 | struct http_data *cookie = io_getcookie(sock); |
210 | size_t i; | 214 | size_t i; |
215 | int chunked = 0; | ||
211 | 216 | ||
212 | /* Look for the first io_batch still containing bytes to write */ | 217 | /* Look for the first io_batch still containing bytes to write */ |
213 | if( cookie ) | 218 | if (cookie) { |
214 | for( i = 0; i < cookie->batches; ++i ) | 219 | if (cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER) |
215 | if( cookie->batch[i].bytesleft ) { | 220 | chunked = 1; |
216 | int64 res = iob_send( sock, cookie->batch + i ); | ||
217 | 221 | ||
218 | if( res == -3 ) | 222 | for (i = 0; i < cookie->batches; ++i) { |
219 | break; | 223 | if (cookie->batch[i].bytesleft) { |
224 | int64 res = iob_send(sock, cookie->batch + i); | ||
220 | 225 | ||
221 | if( !cookie->batch[i].bytesleft ) | 226 | if (res == -3) { |
227 | handle_dead(sock); | ||
228 | return; | ||
229 | } | ||
230 | |||
231 | if (!cookie->batch[i].bytesleft) | ||
222 | continue; | 232 | continue; |
223 | 233 | ||
224 | if( res == -1 || res > 0 || i < cookie->batches - 1 ) | 234 | if (res == -1 || res > 0 || i < cookie->batches - 1) |
225 | return; | 235 | return; |
226 | } | 236 | } |
237 | } | ||
238 | } | ||
227 | 239 | ||
228 | handle_dead( sock ); | 240 | /* In a chunked transfer after all batches accumulated have been sent, wait for the next one */ |
241 | if (chunked) | ||
242 | io_dontwantwrite(sock); | ||
243 | else | ||
244 | handle_dead(sock); | ||
229 | } | 245 | } |
230 | 246 | ||
231 | static void handle_accept( const int64 serversocket ) { | 247 | static void handle_accept(const int64 serversocket) { |
232 | struct http_data *cookie; | 248 | struct http_data *cookie; |
233 | int64 sock; | 249 | int64 sock; |
234 | ot_ip6 ip; | 250 | ot_ip6 ip; |
235 | uint16 port; | 251 | uint16 port; |
236 | tai6464 t; | 252 | tai6464 t; |
237 | 253 | ||
238 | while( ( sock = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) { | 254 | while ((sock = socket_accept6(serversocket, ip, &port, NULL)) != -1) { |
239 | 255 | ||
240 | /* Put fd into a non-blocking mode */ | 256 | /* Put fd into a non-blocking mode */ |
241 | io_nonblock( sock ); | 257 | io_nonblock(sock); |
242 | 258 | ||
243 | if( !io_fd( sock ) || | 259 | if (!io_fd(sock) || !(cookie = (struct http_data *)malloc(sizeof(struct http_data)))) { |
244 | !( cookie = (struct http_data*)malloc( sizeof(struct http_data) ) ) ) { | 260 | io_close(sock); |
245 | io_close( sock ); | ||
246 | continue; | 261 | continue; |
247 | } | 262 | } |
248 | memset(cookie, 0, sizeof( struct http_data ) ); | 263 | memset(cookie, 0, sizeof(struct http_data)); |
249 | memcpy(cookie->ip,ip,sizeof(ot_ip6)); | 264 | memcpy(cookie->ip, ip, sizeof(ot_ip6)); |
250 | 265 | ||
251 | io_setcookie( sock, cookie ); | 266 | io_setcookie(sock, cookie); |
252 | io_wantread( sock ); | 267 | io_wantread(sock); |
253 | 268 | ||
254 | stats_issue_event( EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip); | 269 | stats_issue_event(EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip); |
255 | 270 | ||
256 | /* That breaks taia encapsulation. But there is no way to take system | 271 | /* That breaks taia encapsulation. But there is no way to take system |
257 | time this often in FreeBSD and libowfat does not allow to set unix time */ | 272 | time this often in FreeBSD and libowfat does not allow to set unix time */ |
258 | taia_uint( &t, 0 ); /* Clear t */ | 273 | taia_uint(&t, 0); /* Clear t */ |
259 | tai_unix( &(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT) ); | 274 | tai_unix(&(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT)); |
260 | io_timeout( sock, t ); | 275 | io_timeout(sock, t); |
261 | } | 276 | } |
262 | io_eagain(serversocket); | 277 | io_eagain(serversocket); |
263 | } | 278 | } |
264 | 279 | ||
265 | static void * server_mainloop( void * args ) { | 280 | static void *server_mainloop(void *args) { |
266 | struct ot_workstruct ws; | 281 | struct ot_workstruct ws; |
267 | time_t next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; | 282 | time_t next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; |
268 | struct iovec *iovector; | 283 | struct iovec *iovector; |
269 | int iovec_entries; | 284 | int iovec_entries, is_partial; |
270 | 285 | ||
271 | (void)args; | 286 | (void)args; |
272 | 287 | ||
273 | /* Initialize our "thread local storage" */ | 288 | /* Initialize our "thread local storage" */ |
274 | ws.inbuf = malloc( G_INBUF_SIZE ); | 289 | ws.inbuf = malloc(G_INBUF_SIZE); |
275 | ws.outbuf = malloc( G_OUTBUF_SIZE ); | 290 | ws.outbuf = malloc(G_OUTBUF_SIZE); |
276 | #ifdef _DEBUG_HTTPERROR | 291 | #ifdef _DEBUG_HTTPERROR |
277 | ws.debugbuf= malloc( G_DEBUGBUF_SIZE ); | 292 | ws.debugbuf = malloc(G_DEBUGBUF_SIZE); |
278 | #endif | 293 | #endif |
279 | 294 | ||
280 | if( !ws.inbuf || !ws.outbuf ) | 295 | if (!ws.inbuf || !ws.outbuf) |
281 | panic( "Initializing worker failed" ); | 296 | panic("Initializing worker failed"); |
282 | 297 | ||
283 | #ifdef WANT_ARC4RANDOM | 298 | #ifdef WANT_ARC4RANDOM |
284 | arc4random_buf(&ws.rand48_state[0], 3 * sizeof(uint16_t)); | 299 | arc4random_buf(&ws.rand48_state[0], 3 * sizeof(uint16_t)); |
@@ -288,300 +303,332 @@ static void * server_mainloop( void * args ) { | |||
288 | ws.rand48_state[2] = (uint16_t)random(); | 303 | ws.rand48_state[2] = (uint16_t)random(); |
289 | #endif | 304 | #endif |
290 | 305 | ||
291 | for( ; ; ) { | 306 | for (;;) { |
292 | int64 sock; | 307 | int64 sock; |
293 | 308 | ||
294 | io_wait(); | 309 | io_wait(); |
295 | 310 | ||
296 | while( ( sock = io_canread( ) ) != -1 ) { | 311 | while ((sock = io_canread()) != -1) { |
297 | const void *cookie = io_getcookie( sock ); | 312 | const void *cookie = io_getcookie(sock); |
298 | if( (intptr_t)cookie == FLAG_TCP ) | 313 | if ((intptr_t)cookie == FLAG_TCP) |
299 | handle_accept( sock ); | 314 | handle_accept(sock); |
300 | else if( (intptr_t)cookie == FLAG_UDP ) | 315 | else if ((intptr_t)cookie == FLAG_UDP) |
301 | handle_udp6( sock, &ws ); | 316 | handle_udp6(sock, &ws); |
302 | else if( (intptr_t)cookie == FLAG_SELFPIPE ) | 317 | else if ((intptr_t)cookie == FLAG_SELFPIPE) |
303 | io_tryread( sock, ws.inbuf, G_INBUF_SIZE ); | 318 | io_tryread(sock, ws.inbuf, G_INBUF_SIZE); |
304 | else | 319 | else |
305 | handle_read( sock, &ws ); | 320 | handle_read(sock, &ws); |
306 | } | 321 | } |
307 | 322 | ||
308 | while( ( sock = mutex_workqueue_popresult( &iovec_entries, &iovector ) ) != -1 ) | 323 | while ((sock = mutex_workqueue_popresult(&iovec_entries, &iovector, &is_partial)) != -1) |
309 | http_sendiovecdata( sock, &ws, iovec_entries, iovector ); | 324 | http_sendiovecdata(sock, &ws, iovec_entries, iovector, is_partial); |
310 | 325 | ||
311 | while( ( sock = io_canwrite( ) ) != -1 ) | 326 | while ((sock = io_canwrite()) != -1) |
312 | handle_write( sock ); | 327 | handle_write(sock); |
313 | 328 | ||
314 | if( g_now_seconds > next_timeout_check ) { | 329 | if (g_now_seconds > next_timeout_check) { |
315 | while( ( sock = io_timeouted() ) != -1 ) | 330 | while ((sock = io_timeouted()) != -1) |
316 | handle_dead( sock ); | 331 | handle_dead(sock); |
317 | next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; | 332 | next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; |
318 | } | 333 | } |
319 | 334 | ||
320 | livesync_ticker(); | 335 | livesync_ticker(); |
321 | |||
322 | /* Enforce setting the clock */ | ||
323 | signal_handler( SIGALRM ); | ||
324 | } | 336 | } |
325 | return 0; | 337 | return 0; |
326 | } | 338 | } |
327 | 339 | ||
328 | static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) { | 340 | static int64_t ot_try_bind(ot_ip6 ip, uint16_t port, PROTO_FLAG proto) { |
329 | int64 sock = proto == FLAG_TCP ? socket_tcp6( ) : socket_udp6( ); | 341 | int64 sock = proto == FLAG_TCP ? socket_tcp6() : socket_udp6(); |
330 | |||
331 | #ifndef WANT_V6 | ||
332 | if( !ip6_isv4mapped(ip) ) { | ||
333 | exerr( "V4 Tracker is V4 only!" ); | ||
334 | } | ||
335 | #else | ||
336 | if( ip6_isv4mapped(ip) ) { | ||
337 | exerr( "V6 Tracker is V6 only!" ); | ||
338 | } | ||
339 | #endif | ||
340 | 342 | ||
341 | #ifdef _DEBUG | 343 | #ifdef _DEBUG |
342 | { | 344 | { |
343 | char *protos[] = {"TCP","UDP","UDP mcast"}; | 345 | char *protos[] = {"TCP", "UDP", "UDP mcast"}; |
344 | char _debug[512]; | 346 | char _debug[512]; |
345 | int off = snprintf( _debug, sizeof(_debug), "Binding socket type %s to address [", protos[proto] ); | 347 | int off = snprintf(_debug, sizeof(_debug), "Binding socket type %s to address [", protos[proto]); |
346 | off += fmt_ip6c( _debug+off, ip); | 348 | off += fmt_ip6c(_debug + off, ip); |
347 | snprintf( _debug + off, sizeof(_debug)-off, "]:%d...", port); | 349 | snprintf(_debug + off, sizeof(_debug) - off, "]:%d...", port); |
348 | fputs( _debug, stderr ); | 350 | fputs(_debug, stderr); |
349 | } | 351 | } |
350 | #endif | 352 | #endif |
351 | 353 | ||
352 | if( socket_bind6_reuse( sock, ip, port, 0 ) == -1 ) | 354 | if (socket_bind6_reuse(sock, ip, port, 0) == -1) |
353 | panic( "socket_bind6_reuse" ); | 355 | panic("socket_bind6_reuse"); |
354 | 356 | ||
355 | if( ( proto == FLAG_TCP ) && ( socket_listen( sock, SOMAXCONN) == -1 ) ) | 357 | if ((proto == FLAG_TCP) && (socket_listen(sock, SOMAXCONN) == -1)) |
356 | panic( "socket_listen" ); | 358 | panic("socket_listen"); |
357 | 359 | ||
358 | if( !io_fd( sock ) ) | 360 | if (!io_fd(sock)) |
359 | panic( "io_fd" ); | 361 | panic("io_fd"); |
360 | 362 | ||
361 | io_setcookie( sock, (void*)proto ); | 363 | io_setcookie(sock, (void *)proto); |
362 | 364 | ||
363 | if( (proto == FLAG_UDP) && g_udp_workers ) { | 365 | if ((proto == FLAG_UDP) && g_udp_workers) { |
364 | io_block( sock ); | 366 | io_block(sock); |
365 | udp_init( sock, g_udp_workers ); | 367 | udp_init(sock, g_udp_workers); |
366 | } else | 368 | } else |
367 | io_wantread( sock ); | 369 | io_wantread(sock); |
368 | 370 | ||
369 | #ifdef _DEBUG | 371 | #ifdef _DEBUG |
370 | fputs( " success.\n", stderr); | 372 | fputs(" success.\n", stderr); |
371 | #endif | 373 | #endif |
372 | 374 | ||
373 | return sock; | 375 | return sock; |
374 | } | 376 | } |
375 | 377 | ||
376 | char * set_config_option( char **option, char *value ) { | 378 | char *set_config_option(char **option, char *value) { |
377 | #ifdef _DEBUG | 379 | #ifdef _DEBUG |
378 | fprintf( stderr, "Setting config option: %s\n", value ); | 380 | fprintf(stderr, "Setting config option: %s\n", value); |
379 | #endif | 381 | #endif |
380 | while( isspace(*value) ) ++value; | 382 | while (isspace(*value)) |
381 | free( *option ); | 383 | ++value; |
382 | return *option = strdup( value ); | 384 | free(*option); |
385 | return *option = strdup(value); | ||
383 | } | 386 | } |
384 | 387 | ||
385 | static int scan_ip6_port( const char *src, ot_ip6 ip, uint16 *port ) { | 388 | static int scan_ip6_port(const char *src, ot_ip6 ip, uint16 *port) { |
386 | const char *s = src; | 389 | const char *s = src; |
387 | int off, bracket = 0; | 390 | int off, bracket = 0; |
388 | while( isspace(*s) ) ++s; | 391 | while (isspace(*s)) |
389 | if( *s == '[' ) ++s, ++bracket; /* for v6 style notation */ | 392 | ++s; |
390 | if( !(off = scan_ip6( s, ip ) ) ) | 393 | if (*s == '[') |
394 | ++s, ++bracket; /* for v6 style notation */ | ||
395 | if (!(off = scan_ip6(s, ip))) | ||
391 | return 0; | 396 | return 0; |
392 | s += off; | 397 | s += off; |
393 | if( bracket && *s == ']' ) ++s; | 398 | if (bracket && *s == ']') |
394 | if( *s == 0 || isspace(*s)) return s-src; | 399 | ++s; |
395 | if( !ip6_isv4mapped(ip)){ | 400 | if (*s == 0 || isspace(*s)) |
396 | if( *s != ':' && *s != '.' ) return 0; | 401 | return s - src; |
397 | if( !bracket && *(s) == ':' ) return 0; | 402 | if (!ip6_isv4mapped(ip)) { |
403 | if (*s != ':' && *s != '.') | ||
404 | return 0; | ||
405 | if (!bracket && *(s) == ':') | ||
406 | return 0; | ||
398 | s++; | 407 | s++; |
399 | } else { | 408 | } else { |
400 | if( *(s++) != ':' ) return 0; | 409 | if (*(s++) != ':') |
410 | return 0; | ||
401 | } | 411 | } |
402 | if( !(off = scan_ushort (s, port ) ) ) | 412 | if (!(off = scan_ushort(s, port))) |
403 | return 0; | 413 | return 0; |
404 | return off+s-src; | 414 | return off + s - src; |
405 | } | 415 | } |
406 | 416 | ||
407 | int parse_configfile( char * config_filename ) { | 417 | static int scan_ip6_net(const char *src, ot_net *net) { |
408 | FILE * accesslist_filehandle; | 418 | const char *s = src; |
409 | char inbuf[512]; | 419 | int off; |
410 | ot_ip6 tmpip; | 420 | while (isspace(*s)) |
411 | int bound = 0; | 421 | ++s; |
422 | if (!(off = scan_ip6(s, net->address))) | ||
423 | return 0; | ||
424 | s += off; | ||
425 | if (*s != '/') | ||
426 | net->bits = 128; | ||
427 | else { | ||
428 | s++; | ||
429 | if (!(off = scan_int(s, &net->bits))) | ||
430 | return 0; | ||
431 | if (ip6_isv4mapped(net->address)) | ||
432 | net->bits += 96; | ||
433 | if (net->bits > 128) | ||
434 | return 0; | ||
435 | s += off; | ||
436 | } | ||
437 | return off + s - src; | ||
438 | } | ||
412 | 439 | ||
413 | accesslist_filehandle = fopen( config_filename, "r" ); | 440 | int parse_configfile(char *config_filename) { |
441 | FILE *accesslist_filehandle; | ||
442 | char inbuf[512]; | ||
443 | ot_ip6 tmpip; | ||
444 | #if defined(WANT_RESTRICT_STATS) || defined(WANT_IP_FROM_PROXY) || defined(WANT_SYNC_LIVE) | ||
445 | ot_net tmpnet; | ||
446 | #endif | ||
447 | int bound = 0; | ||
414 | 448 | ||
415 | if( accesslist_filehandle == NULL ) { | 449 | accesslist_filehandle = fopen(config_filename, "r"); |
416 | fprintf( stderr, "Warning: Can't open config file: %s.", config_filename ); | 450 | |
451 | if (accesslist_filehandle == NULL) { | ||
452 | fprintf(stderr, "Warning: Can't open config file: %s.", config_filename); | ||
417 | return 0; | 453 | return 0; |
418 | } | 454 | } |
419 | 455 | ||
420 | while( fgets( inbuf, sizeof(inbuf), accesslist_filehandle ) ) { | 456 | while (fgets(inbuf, sizeof(inbuf), accesslist_filehandle)) { |
421 | char *p = inbuf; | 457 | char *p = inbuf; |
422 | size_t strl; | 458 | size_t strl; |
423 | 459 | ||
424 | /* Skip white spaces */ | 460 | /* Skip white spaces */ |
425 | while(isspace(*p)) ++p; | 461 | while (isspace(*p)) |
462 | ++p; | ||
426 | 463 | ||
427 | /* Ignore comments and empty lines */ | 464 | /* Ignore comments and empty lines */ |
428 | if((*p=='#')||(*p=='\n')||(*p==0)) continue; | 465 | if ((*p == '#') || (*p == '\n') || (*p == 0)) |
466 | continue; | ||
429 | 467 | ||
430 | /* consume trailing new lines and spaces */ | 468 | /* consume trailing new lines and spaces */ |
431 | strl = strlen(p); | 469 | strl = strlen(p); |
432 | while( strl && isspace(p[strl-1])) | 470 | while (strl && isspace(p[strl - 1])) |
433 | p[--strl] = 0; | 471 | p[--strl] = 0; |
434 | 472 | ||
435 | /* Scan for commands */ | 473 | /* Scan for commands */ |
436 | if(!byte_diff(p,15,"tracker.rootdir" ) && isspace(p[15])) { | 474 | if (!byte_diff(p, 15, "tracker.rootdir") && isspace(p[15])) { |
437 | set_config_option( &g_serverdir, p+16 ); | 475 | set_config_option(&g_serverdir, p + 16); |
438 | } else if(!byte_diff(p,12,"tracker.user" ) && isspace(p[12])) { | 476 | } else if (!byte_diff(p, 12, "tracker.user") && isspace(p[12])) { |
439 | set_config_option( &g_serveruser, p+13 ); | 477 | set_config_option(&g_serveruser, p + 13); |
440 | } else if(!byte_diff(p,14,"listen.tcp_udp" ) && isspace(p[14])) { | 478 | } else if (!byte_diff(p, 14, "listen.tcp_udp") && isspace(p[14])) { |
441 | uint16_t tmpport = 6969; | 479 | uint16_t tmpport = 6969; |
442 | if( !scan_ip6_port( p+15, tmpip, &tmpport )) goto parse_error; | 480 | if (!scan_ip6_port(p + 15, tmpip, &tmpport)) |
443 | ot_try_bind( tmpip, tmpport, FLAG_TCP ); ++bound; | 481 | goto parse_error; |
444 | ot_try_bind( tmpip, tmpport, FLAG_UDP ); ++bound; | 482 | ot_try_bind(tmpip, tmpport, FLAG_TCP); |
445 | } else if(!byte_diff(p,10,"listen.tcp" ) && isspace(p[10])) { | 483 | ++bound; |
484 | ot_try_bind(tmpip, tmpport, FLAG_UDP); | ||
485 | ++bound; | ||
486 | } else if (!byte_diff(p, 10, "listen.tcp") && isspace(p[10])) { | ||
446 | uint16_t tmpport = 6969; | 487 | uint16_t tmpport = 6969; |
447 | if( !scan_ip6_port( p+11, tmpip, &tmpport )) goto parse_error; | 488 | if (!scan_ip6_port(p + 11, tmpip, &tmpport)) |
448 | ot_try_bind( tmpip, tmpport, FLAG_TCP ); | 489 | goto parse_error; |
490 | ot_try_bind(tmpip, tmpport, FLAG_TCP); | ||
449 | ++bound; | 491 | ++bound; |
450 | } else if(!byte_diff(p, 10, "listen.udp" ) && isspace(p[10])) { | 492 | } else if (!byte_diff(p, 10, "listen.udp") && isspace(p[10])) { |
451 | uint16_t tmpport = 6969; | 493 | uint16_t tmpport = 6969; |
452 | if( !scan_ip6_port( p+11, tmpip, &tmpport )) goto parse_error; | 494 | if (!scan_ip6_port(p + 11, tmpip, &tmpport)) |
453 | ot_try_bind( tmpip, tmpport, FLAG_UDP ); | 495 | goto parse_error; |
496 | ot_try_bind(tmpip, tmpport, FLAG_UDP); | ||
454 | ++bound; | 497 | ++bound; |
455 | } else if(!byte_diff(p,18,"listen.udp.workers" ) && isspace(p[18])) { | 498 | } else if (!byte_diff(p, 18, "listen.udp.workers") && isspace(p[18])) { |
456 | char *value = p + 18; | 499 | char *value = p + 18; |
457 | while( isspace(*value) ) ++value; | 500 | while (isspace(*value)) |
458 | scan_uint( value, &g_udp_workers ); | 501 | ++value; |
502 | scan_uint(value, &g_udp_workers); | ||
459 | #ifdef WANT_ACCESSLIST_WHITE | 503 | #ifdef WANT_ACCESSLIST_WHITE |
460 | } else if(!byte_diff(p, 16, "access.whitelist" ) && isspace(p[16])) { | 504 | } else if (!byte_diff(p, 16, "access.whitelist") && isspace(p[16])) { |
461 | set_config_option( &g_accesslist_filename, p+17 ); | 505 | set_config_option(&g_accesslist_filename, p + 17); |
462 | #elif defined( WANT_ACCESSLIST_BLACK ) | 506 | #elif defined(WANT_ACCESSLIST_BLACK) |
463 | } else if(!byte_diff(p, 16, "access.blacklist" ) && isspace(p[16])) { | 507 | } else if (!byte_diff(p, 16, "access.blacklist") && isspace(p[16])) { |
464 | set_config_option( &g_accesslist_filename, p+17 ); | 508 | set_config_option(&g_accesslist_filename, p + 17); |
509 | #endif | ||
510 | #ifdef WANT_DYNAMIC_ACCESSLIST | ||
511 | } else if (!byte_diff(p, 15, "access.fifo_add") && isspace(p[15])) { | ||
512 | set_config_option(&g_accesslist_pipe_add, p + 16); | ||
513 | } else if (!byte_diff(p, 18, "access.fifo_delete") && isspace(p[18])) { | ||
514 | set_config_option(&g_accesslist_pipe_delete, p + 19); | ||
465 | #endif | 515 | #endif |
466 | #ifdef WANT_RESTRICT_STATS | 516 | #ifdef WANT_RESTRICT_STATS |
467 | } else if(!byte_diff(p, 12, "access.stats" ) && isspace(p[12])) { | 517 | } else if (!byte_diff(p, 12, "access.stats") && isspace(p[12])) { |
468 | if( !scan_ip6( p+13, tmpip )) goto parse_error; | 518 | if (!scan_ip6_net(p + 13, &tmpnet)) |
469 | accesslist_blessip( tmpip, OT_PERMISSION_MAY_STAT ); | 519 | goto parse_error; |
520 | accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_STAT); | ||
470 | #endif | 521 | #endif |
471 | } else if(!byte_diff(p, 17, "access.stats_path" ) && isspace(p[17])) { | 522 | } else if (!byte_diff(p, 17, "access.stats_path") && isspace(p[17])) { |
472 | set_config_option( &g_stats_path, p+18 ); | 523 | set_config_option(&g_stats_path, p + 18); |
473 | #ifdef WANT_IP_FROM_PROXY | 524 | #ifdef WANT_IP_FROM_PROXY |
474 | } else if(!byte_diff(p, 12, "access.proxy" ) && isspace(p[12])) { | 525 | } else if (!byte_diff(p, 12, "access.proxy") && isspace(p[12])) { |
475 | if( !scan_ip6( p+13, tmpip )) goto parse_error; | 526 | if (!scan_ip6_net(p + 13, &tmpnet)) |
476 | accesslist_blessip( tmpip, OT_PERMISSION_MAY_PROXY ); | 527 | goto parse_error; |
528 | accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_PROXY); | ||
477 | #endif | 529 | #endif |
478 | } else if(!byte_diff(p, 20, "tracker.redirect_url" ) && isspace(p[20])) { | 530 | } else if (!byte_diff(p, 20, "tracker.redirect_url") && isspace(p[20])) { |
479 | set_config_option( &g_redirecturl, p+21 ); | 531 | set_config_option(&g_redirecturl, p + 21); |
480 | #ifdef WANT_SYNC_LIVE | 532 | #ifdef WANT_SYNC_LIVE |
481 | } else if(!byte_diff(p, 24, "livesync.cluster.node_ip" ) && isspace(p[24])) { | 533 | } else if (!byte_diff(p, 24, "livesync.cluster.node_ip") && isspace(p[24])) { |
482 | if( !scan_ip6( p+25, tmpip )) goto parse_error; | 534 | if (!scan_ip6_net(p + 25, &tmpnet)) |
483 | accesslist_blessip( tmpip, OT_PERMISSION_MAY_LIVESYNC ); | 535 | goto parse_error; |
484 | } else if(!byte_diff(p, 23, "livesync.cluster.listen" ) && isspace(p[23])) { | 536 | accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_LIVESYNC); |
537 | } else if (!byte_diff(p, 23, "livesync.cluster.listen") && isspace(p[23])) { | ||
485 | uint16_t tmpport = LIVESYNC_PORT; | 538 | uint16_t tmpport = LIVESYNC_PORT; |
486 | if( !scan_ip6_port( p+24, tmpip, &tmpport )) goto parse_error; | 539 | if (!scan_ip6_port(p + 24, tmpip, &tmpport)) |
487 | livesync_bind_mcast( tmpip, tmpport ); | 540 | goto parse_error; |
541 | livesync_bind_mcast(tmpip, tmpport); | ||
488 | #endif | 542 | #endif |
489 | } else | 543 | } else |
490 | fprintf( stderr, "Unhandled line in config file: %s\n", inbuf ); | 544 | fprintf(stderr, "Unhandled line in config file: %s\n", inbuf); |
491 | continue; | 545 | continue; |
492 | parse_error: | 546 | parse_error: |
493 | fprintf( stderr, "Parse error in config file: %s\n", inbuf); | 547 | fprintf(stderr, "Parse error in config file: %s\n", inbuf); |
494 | } | 548 | } |
495 | fclose( accesslist_filehandle ); | 549 | fclose(accesslist_filehandle); |
496 | return bound; | 550 | return bound; |
497 | } | 551 | } |
498 | 552 | ||
499 | void load_state(const char * const state_filename ) { | 553 | void load_state(const char *const state_filename) { |
500 | FILE * state_filehandle; | 554 | FILE *state_filehandle; |
501 | char inbuf[512]; | 555 | char inbuf[512]; |
502 | ot_hash infohash; | 556 | ot_hash infohash; |
503 | unsigned long long base, downcount; | 557 | unsigned long long base, downcount; |
504 | int consumed; | 558 | int consumed; |
505 | 559 | ||
506 | state_filehandle = fopen( state_filename, "r" ); | 560 | state_filehandle = fopen(state_filename, "r"); |
507 | 561 | ||
508 | if( state_filehandle == NULL ) { | 562 | if (state_filehandle == NULL) { |
509 | fprintf( stderr, "Warning: Can't open config file: %s.", state_filename ); | 563 | fprintf(stderr, "Warning: Can't open config file: %s.", state_filename); |
510 | return; | 564 | return; |
511 | } | 565 | } |
512 | 566 | ||
513 | /* We do ignore anything that is not of the form "^[:xdigit:]:\d+:\d+" */ | 567 | /* We do ignore anything that is not of the form "^[:xdigit:]:\d+:\d+" */ |
514 | while( fgets( inbuf, sizeof(inbuf), state_filehandle ) ) { | 568 | while (fgets(inbuf, sizeof(inbuf), state_filehandle)) { |
515 | int i; | 569 | int i; |
516 | for( i=0; i<(int)sizeof(ot_hash); ++i ) { | 570 | for (i = 0; i < (int)sizeof(ot_hash); ++i) { |
517 | int eger = 16 * scan_fromhex( inbuf[ 2*i ] ) + scan_fromhex( inbuf[ 1 + 2*i ] ); | 571 | int eger = 16 * scan_fromhex(inbuf[2 * i]) + scan_fromhex(inbuf[1 + 2 * i]); |
518 | if( eger < 0 ) | 572 | if (eger < 0) |
519 | continue; | 573 | continue; |
520 | infohash[i] = eger; | 574 | infohash[i] = eger; |
521 | } | 575 | } |
522 | 576 | ||
523 | if( i != (int)sizeof(ot_hash) ) continue; | 577 | if (i != (int)sizeof(ot_hash)) |
578 | continue; | ||
524 | i *= 2; | 579 | i *= 2; |
525 | 580 | ||
526 | if( inbuf[ i++ ] != ':' || !( consumed = scan_ulonglong( inbuf+i, &base ) ) ) continue; | 581 | if (inbuf[i++] != ':' || !(consumed = scan_ulonglong(inbuf + i, &base))) |
582 | continue; | ||
527 | i += consumed; | 583 | i += consumed; |
528 | if( inbuf[ i++ ] != ':' || !( consumed = scan_ulonglong( inbuf+i, &downcount ) ) ) continue; | 584 | if (inbuf[i++] != ':' || !(consumed = scan_ulonglong(inbuf + i, &downcount))) |
529 | add_torrent_from_saved_state( infohash, base, downcount ); | 585 | continue; |
586 | add_torrent_from_saved_state(infohash, base, downcount); | ||
530 | } | 587 | } |
531 | 588 | ||
532 | fclose( state_filehandle ); | 589 | fclose(state_filehandle); |
533 | } | 590 | } |
534 | 591 | ||
535 | int drop_privileges ( const char * const serveruser, const char * const serverdir ) { | 592 | int drop_privileges(const char *const serveruser, const char *const serverdir) { |
536 | struct passwd *pws = NULL; | 593 | struct passwd *pws = NULL; |
537 | 594 | ||
538 | #ifdef _DEBUG | 595 | #ifdef _DEBUG |
539 | if( !geteuid() ) | 596 | if (!geteuid()) |
540 | fprintf( stderr, "Dropping to user %s.\n", serveruser ); | 597 | fprintf(stderr, "Dropping to user %s.\n", serveruser); |
541 | if( serverdir ) | 598 | if (serverdir) |
542 | fprintf( stderr, "ch%s'ing to directory %s.\n", geteuid() ? "dir" : "root", serverdir ); | 599 | fprintf(stderr, "ch%s'ing to directory %s.\n", geteuid() ? "dir" : "root", serverdir); |
543 | #endif | 600 | #endif |
544 | 601 | ||
545 | /* Grab pws entry before chrooting */ | 602 | /* Grab pws entry before chrooting */ |
546 | pws = getpwnam( serveruser ); | 603 | pws = getpwnam(serveruser); |
547 | endpwent(); | 604 | endpwent(); |
548 | 605 | ||
549 | if( geteuid() == 0 ) { | 606 | if (geteuid() == 0) { |
550 | /* Running as root: chroot and drop privileges */ | 607 | /* Running as root: chroot and drop privileges */ |
551 | if( serverdir && chroot( serverdir ) ) { | 608 | if (serverdir && chroot(serverdir)) { |
552 | fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) ); | 609 | fprintf(stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno)); |
553 | return -1; | 610 | return -1; |
554 | } | 611 | } |
555 | 612 | ||
556 | if(chdir("/")) | 613 | if (chdir("/")) |
557 | panic("chdir() failed after chrooting: "); | 614 | panic("chdir() failed after chrooting: "); |
558 | 615 | ||
559 | /* If we can't find server user, revert to nobody's default uid */ | 616 | /* If we can't find server user, revert to nobody's default uid */ |
560 | if( !pws ) { | 617 | if (!pws) { |
561 | fprintf( stderr, "Warning: Could not get password entry for %s. Reverting to uid -2.\n", serveruser ); | 618 | fprintf(stderr, "Warning: Could not get password entry for %s. Reverting to uid -2.\n", serveruser); |
562 | if (!setegid( (gid_t)-2 ) || | 619 | if (setegid((gid_t)-2) || setgid((gid_t)-2) || setuid((uid_t)-2) || seteuid((uid_t)-2)) |
563 | !setgid( (gid_t)-2 ) || | ||
564 | !setuid( (uid_t)-2 ) || | ||
565 | !seteuid( (uid_t)-2 )) { | ||
566 | panic("Could not set uid to value -2"); | 620 | panic("Could not set uid to value -2"); |
567 | } | 621 | } else { |
568 | } | 622 | if (setegid(pws->pw_gid) || setgid(pws->pw_gid) || setuid(pws->pw_uid) || seteuid(pws->pw_uid)) |
569 | else { | ||
570 | if (!setegid( pws->pw_gid ) || | ||
571 | !setgid( pws->pw_gid ) || | ||
572 | !setuid( pws->pw_uid ) || | ||
573 | !seteuid( pws->pw_uid )) { | ||
574 | panic("Could not set uid to specified value"); | 623 | panic("Could not set uid to specified value"); |
575 | } | ||
576 | } | 624 | } |
577 | 625 | ||
578 | if( geteuid() == 0 || getegid() == 0 ) | 626 | if (geteuid() == 0 || getegid() == 0) |
579 | panic("Still running with root privileges?!"); | 627 | panic("Still running with root privileges?!"); |
580 | } | 628 | } else { |
581 | else { | ||
582 | /* Normal user, just chdir() */ | 629 | /* Normal user, just chdir() */ |
583 | if( serverdir && chdir( serverdir ) ) { | 630 | if (serverdir && chdir(serverdir)) { |
584 | fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) ); | 631 | fprintf(stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno)); |
585 | return -1; | 632 | return -1; |
586 | } | 633 | } |
587 | } | 634 | } |
@@ -589,118 +636,173 @@ int drop_privileges ( const char * const serveruser, const char * const serverdi | |||
589 | return 0; | 636 | return 0; |
590 | } | 637 | } |
591 | 638 | ||
592 | int main( int argc, char **argv ) { | 639 | /* Maintain our copy of the clock. time() on BSDs is very expensive. */ |
593 | ot_ip6 serverip, tmpip; | 640 | static void *time_caching_worker(void *args) { |
594 | int bound = 0, scanon = 1; | 641 | (void)args; |
595 | uint16_t tmpport; | 642 | while (1) { |
596 | char * statefile = 0; | 643 | g_now_seconds = time(NULL); |
644 | sleep(5); | ||
645 | } | ||
646 | return NULL; | ||
647 | } | ||
597 | 648 | ||
598 | memset( serverip, 0, sizeof(ot_ip6) ); | 649 | int main(int argc, char **argv) { |
599 | #ifndef WANT_V6 | 650 | ot_ip6 serverip; |
600 | serverip[10]=serverip[11]=-1; | 651 | ot_net tmpnet; |
601 | noipv6=1; | 652 | int bound = 0, scanon = 1; |
653 | uint16_t tmpport; | ||
654 | char *statefile = 0; | ||
655 | pthread_t thread_id; /* time cacher */ | ||
656 | |||
657 | memset(serverip, 0, sizeof(ot_ip6)); | ||
658 | #ifdef WANT_V4_ONLY | ||
659 | serverip[10] = serverip[11] = -1; | ||
602 | #endif | 660 | #endif |
603 | 661 | ||
604 | #ifdef WANT_DEV_RANDOM | 662 | #ifdef WANT_DEV_RANDOM |
605 | srandomdev(); | 663 | srandomdev(); |
606 | #else | 664 | #else |
607 | srandom( time(NULL) ); | 665 | srandom(time(NULL)); |
608 | #endif | 666 | #endif |
609 | 667 | ||
610 | while( scanon ) { | 668 | while (scanon) { |
611 | switch( getopt( argc, argv, ":i:p:A:P:d:u:r:s:f:l:v" | 669 | switch (getopt(argc, argv, |
670 | ":i:p:A:P:d:u:r:s:f:l:v" | ||
612 | #ifdef WANT_ACCESSLIST_BLACK | 671 | #ifdef WANT_ACCESSLIST_BLACK |
613 | "b:" | 672 | "b:" |
614 | #elif defined( WANT_ACCESSLIST_WHITE ) | 673 | #elif defined(WANT_ACCESSLIST_WHITE) |
615 | "w:" | 674 | "w:" |
616 | #endif | 675 | #endif |
617 | "h" ) ) { | 676 | "h")) { |
618 | case -1 : scanon = 0; break; | 677 | case -1: |
619 | case 'i': | 678 | scanon = 0; |
620 | if( !scan_ip6( optarg, serverip )) { usage( argv[0] ); exit( 1 ); } | 679 | break; |
621 | break; | 680 | case 'i': |
681 | if (!scan_ip6(optarg, serverip)) { | ||
682 | usage(argv[0]); | ||
683 | exit(1); | ||
684 | } | ||
685 | break; | ||
622 | #ifdef WANT_ACCESSLIST_BLACK | 686 | #ifdef WANT_ACCESSLIST_BLACK |
623 | case 'b': set_config_option( &g_accesslist_filename, optarg); break; | 687 | case 'b': |
624 | #elif defined( WANT_ACCESSLIST_WHITE ) | 688 | set_config_option(&g_accesslist_filename, optarg); |
625 | case 'w': set_config_option( &g_accesslist_filename, optarg); break; | 689 | break; |
690 | #elif defined(WANT_ACCESSLIST_WHITE) | ||
691 | case 'w': | ||
692 | set_config_option(&g_accesslist_filename, optarg); | ||
693 | break; | ||
626 | #endif | 694 | #endif |
627 | case 'p': | 695 | case 'p': |
628 | if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } | 696 | if (!scan_ushort(optarg, &tmpport)) { |
629 | ot_try_bind( serverip, tmpport, FLAG_TCP ); bound++; break; | 697 | usage(argv[0]); |
630 | case 'P': | 698 | exit(1); |
631 | if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } | 699 | } |
632 | ot_try_bind( serverip, tmpport, FLAG_UDP ); bound++; break; | 700 | ot_try_bind(serverip, tmpport, FLAG_TCP); |
701 | bound++; | ||
702 | break; | ||
703 | case 'P': | ||
704 | if (!scan_ushort(optarg, &tmpport)) { | ||
705 | usage(argv[0]); | ||
706 | exit(1); | ||
707 | } | ||
708 | ot_try_bind(serverip, tmpport, FLAG_UDP); | ||
709 | bound++; | ||
710 | break; | ||
633 | #ifdef WANT_SYNC_LIVE | 711 | #ifdef WANT_SYNC_LIVE |
634 | case 's': | 712 | case 's': |
635 | if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } | 713 | if (!scan_ushort(optarg, &tmpport)) { |
636 | livesync_bind_mcast( serverip, tmpport); break; | 714 | usage(argv[0]); |
715 | exit(1); | ||
716 | } | ||
717 | livesync_bind_mcast(serverip, tmpport); | ||
718 | break; | ||
637 | #endif | 719 | #endif |
638 | case 'd': set_config_option( &g_serverdir, optarg ); break; | 720 | case 'd': |
639 | case 'u': set_config_option( &g_serveruser, optarg ); break; | 721 | set_config_option(&g_serverdir, optarg); |
640 | case 'r': set_config_option( &g_redirecturl, optarg ); break; | 722 | break; |
641 | case 'l': statefile = optarg; break; | 723 | case 'u': |
642 | case 'A': | 724 | set_config_option(&g_serveruser, optarg); |
643 | if( !scan_ip6( optarg, tmpip )) { usage( argv[0] ); exit( 1 ); } | 725 | break; |
644 | accesslist_blessip( tmpip, 0xffff ); /* Allow everything for now */ | 726 | case 'r': |
645 | break; | 727 | set_config_option(&g_redirecturl, optarg); |
646 | case 'f': bound += parse_configfile( optarg ); break; | 728 | break; |
647 | case 'h': help( argv[0] ); exit( 0 ); | 729 | case 'l': |
648 | case 'v': { | 730 | statefile = optarg; |
649 | char buffer[8192]; | 731 | break; |
650 | stats_return_tracker_version( buffer ); | 732 | case 'A': |
651 | fputs( buffer, stderr ); | 733 | if (!scan_ip6_net(optarg, &tmpnet)) { |
652 | exit( 0 ); | 734 | usage(argv[0]); |
735 | exit(1); | ||
653 | } | 736 | } |
654 | default: | 737 | accesslist_bless_net(&tmpnet, 0xffff); /* Allow everything for now */ |
655 | case '?': usage( argv[0] ); exit( 1 ); | 738 | break; |
739 | case 'f': | ||
740 | bound += parse_configfile(optarg); | ||
741 | break; | ||
742 | case 'h': | ||
743 | help(argv[0]); | ||
744 | exit(0); | ||
745 | case 'v': { | ||
746 | char buffer[8192]; | ||
747 | stats_return_tracker_version(buffer); | ||
748 | fputs(buffer, stderr); | ||
749 | exit(0); | ||
750 | } | ||
751 | default: | ||
752 | case '?': | ||
753 | usage(argv[0]); | ||
754 | exit(1); | ||
656 | } | 755 | } |
657 | } | 756 | } |
658 | 757 | ||
659 | /* Bind to our default tcp/udp ports */ | 758 | /* Bind to our default tcp/udp ports */ |
660 | if( !bound) { | 759 | if (!bound) { |
661 | ot_try_bind( serverip, 6969, FLAG_TCP ); | 760 | ot_try_bind(serverip, 6969, FLAG_TCP); |
662 | ot_try_bind( serverip, 6969, FLAG_UDP ); | 761 | ot_try_bind(serverip, 6969, FLAG_UDP); |
663 | } | 762 | } |
664 | 763 | ||
764 | defaul_signal_handlers(); | ||
765 | |||
665 | #ifdef WANT_SYSLOGS | 766 | #ifdef WANT_SYSLOGS |
666 | openlog( "opentracker", 0, LOG_USER ); | 767 | openlog("opentracker", 0, LOG_USER); |
667 | setlogmask(LOG_UPTO(LOG_INFO)); | 768 | setlogmask(LOG_UPTO(LOG_INFO)); |
668 | #endif | 769 | #endif |
669 | 770 | ||
670 | if( drop_privileges( g_serveruser ? g_serveruser : "nobody", g_serverdir ) == -1 ) | 771 | if (drop_privileges(g_serveruser ? g_serveruser : "nobody", g_serverdir) == -1) |
671 | panic( "drop_privileges failed, exiting. Last error"); | 772 | panic("drop_privileges failed, exiting. Last error"); |
672 | 773 | ||
673 | g_now_seconds = time( NULL ); | 774 | g_now_seconds = time(NULL); |
775 | pthread_create(&thread_id, NULL, time_caching_worker, NULL); | ||
674 | 776 | ||
675 | /* Create our self pipe which allows us to interrupt mainloops | 777 | /* Create our self pipe which allows us to interrupt mainloops |
676 | io_wait in case some data is available to send out */ | 778 | io_wait in case some data is available to send out */ |
677 | if( pipe( g_self_pipe ) == -1 ) | 779 | if (pipe(g_self_pipe) == -1) |
678 | panic( "selfpipe failed: " ); | 780 | panic("selfpipe failed: "); |
679 | if( !io_fd( g_self_pipe[0] ) ) | 781 | if (!io_fd(g_self_pipe[0])) |
680 | panic( "selfpipe io_fd failed: " ); | 782 | panic("selfpipe io_fd failed: "); |
681 | if( !io_fd( g_self_pipe[1] ) ) | 783 | if (!io_fd(g_self_pipe[1])) |
682 | panic( "selfpipe io_fd failed: " ); | 784 | panic("selfpipe io_fd failed: "); |
683 | io_setcookie( g_self_pipe[0], (void*)FLAG_SELFPIPE ); | 785 | io_setcookie(g_self_pipe[0], (void *)FLAG_SELFPIPE); |
684 | io_wantread( g_self_pipe[0] ); | 786 | io_wantread(g_self_pipe[0]); |
685 | 787 | ||
686 | defaul_signal_handlers( ); | ||
687 | /* Init all sub systems. This call may fail with an exit() */ | 788 | /* Init all sub systems. This call may fail with an exit() */ |
688 | trackerlogic_init( ); | 789 | trackerlogic_init(); |
689 | 790 | ||
690 | if( statefile ) | 791 | #ifdef _DEBUG_RANDOMTORRENTS |
691 | load_state( statefile ); | 792 | fprintf(stderr, "DEBUG: Generating %d random peers on random torrents. This may take a while. (Setting RANDOMTORRENTS in trackerlogic.h)\n", RANDOMTORRENTS); |
793 | trackerlogic_add_random_torrents(RANDOMTORRENTS); | ||
794 | fprintf(stderr, "... done.\n"); | ||
795 | #endif | ||
692 | 796 | ||
693 | install_signal_handlers( ); | 797 | if (statefile) |
798 | load_state(statefile); | ||
694 | 799 | ||
695 | if( !g_udp_workers ) | 800 | install_signal_handlers(); |
696 | udp_init( -1, 0 ); | ||
697 | 801 | ||
698 | /* Kick off our initial clock setting alarm */ | 802 | if (!g_udp_workers) |
699 | alarm(5); | 803 | udp_init(-1, 0); |
700 | 804 | ||
701 | server_mainloop( 0 ); | 805 | server_mainloop(0); |
702 | 806 | ||
703 | return 0; | 807 | return 0; |
704 | } | 808 | } |
705 | |||
706 | const char *g_version_opentracker_c = "$Source$: $Revision$\n"; | ||
diff --git a/opentracker.conf.sample b/opentracker.conf.sample index db45122..054e405 100644 --- a/opentracker.conf.sample +++ b/opentracker.conf.sample | |||
@@ -2,7 +2,7 @@ | |||
2 | # | 2 | # |
3 | 3 | ||
4 | # I) Address opentracker will listen on, using both, tcp AND udp family | 4 | # I) Address opentracker will listen on, using both, tcp AND udp family |
5 | # (note, that port 6969 is implicite if ommitted). | 5 | # (note, that port 6969 is implicit if omitted). |
6 | # | 6 | # |
7 | # If no listen option is given (here or on the command line), opentracker | 7 | # If no listen option is given (here or on the command line), opentracker |
8 | # listens on 0.0.0.0:6969 tcp and udp. | 8 | # listens on 0.0.0.0:6969 tcp and udp. |
@@ -44,17 +44,65 @@ | |||
44 | # listing, so choose one of those options at compile time. File format | 44 | # listing, so choose one of those options at compile time. File format |
45 | # is straight forward: "<hex info hash>\n<hex info hash>\n..." | 45 | # is straight forward: "<hex info hash>\n<hex info hash>\n..." |
46 | # | 46 | # |
47 | # IIa) You can enable dynamic changesets to accesslists by enabling | ||
48 | # WANT_DYNAMIC_ACCESSLIST. | ||
49 | # | ||
50 | # The suggested way to work with dynamic changeset lists is to keep a | ||
51 | # main accesslist file that is loaded when opentracker (re)starts and | ||
52 | # reloaded infrequently (hourly or daily). | ||
53 | # | ||
54 | # All changes to the accesslist (e.g. from a web frontend) should be | ||
55 | # both appended to or removed from that file and sent to opentracker. By | ||
56 | # keeping dynamic changeset lists, you can avoid reloading huge | ||
57 | # accesslists whenever just a single entry is added or removed. | ||
58 | # | ||
59 | # Any info_hash (format see above) written to the fifo_add file will be | ||
60 | # kept on a dynamic add-changeset, removed from the dynamic | ||
61 | # delete-changeset and treated as if it was in the main accesslist file. | ||
62 | # The semantic of the respective dynamic changeset depends on whether | ||
63 | # WANT_ACCESSLIST_WHITE or WANT_ACCESSLIST_BLACK is enabled. | ||
64 | # | ||
65 | # access.fifo_add /var/run/opentracker/adder.fifo | ||
66 | # | ||
67 | # Any info_hash (format see above) written to the fifo_delete file will | ||
68 | # be kept on a dynamic delete-changeset, removed from the dynamic | ||
69 | # add-changeset and treated as if it was not in the main accesslist | ||
70 | # file. | ||
71 | # | ||
72 | # access.fifo_delete /var/run/opentracker/deleter.fifo | ||
73 | # | ||
74 | # If you reload the accesslist by sending SIGHUP to the tracker process, | ||
75 | # the dynamic lists are flushed, as opentracker assumes thoses lists are | ||
76 | # merged into the main accesslist. | ||
77 | # | ||
78 | # NOTE: While you can have multiple writers sending lines to the fifos, | ||
79 | # any writes larger than PIPE_BUF (see your limits.h, minimally 512 | ||
80 | # bytes but usually 4096) may be interleaved with data sent by other | ||
81 | # writers. This can lead to unparsable lines of info_hashes. | ||
82 | # | ||
83 | # IIb) | ||
47 | # If you do not want to grant anyone access to your stats, enable the | 84 | # If you do not want to grant anyone access to your stats, enable the |
48 | # WANT_RESTRICT_STATS option in Makefile and bless the ip addresses | 85 | # WANT_RESTRICT_STATS option in Makefile and bless the ip addresses |
49 | # allowed to fetch stats here. | 86 | # or network allowed to fetch stats here. |
50 | # | 87 | # |
51 | # access.stats 192.168.0.23 | 88 | # access.stats 192.168.0.23 |
89 | # access.stats 10.1.1.23 | ||
52 | # | 90 | # |
53 | # There is another way of hiding your stats. You can obfuscate the path | 91 | # There is another way of hiding your stats. You can obfuscate the path |
54 | # to them. Normally it is located at /stats but you can configure it to | 92 | # to them. Normally it is located at /stats but you can configure it to |
55 | # appear anywhere on your tracker. | 93 | # appear anywhere on your tracker. |
56 | # | 94 | # |
57 | # access.stats_path stats | 95 | # access.stats_path stats |
96 | # | ||
97 | # II | ||
98 | # If opentracker lives behind one or multiple reverse proxies, | ||
99 | # every http connection appears to come from these proxies. In order to | ||
100 | # take the X-Forwarded-For address instead, compile opentracker with the | ||
101 | # WANT_IP_FROM_PROXY option and set your proxy addresses or networkss here. | ||
102 | # | ||
103 | # access.proxy 10.0.1.23 | ||
104 | # access.proxy 192.0.0.0/8 | ||
105 | # | ||
58 | 106 | ||
59 | # III) Live sync uses udp multicast packets to keep a cluster of opentrackers | 107 | # III) Live sync uses udp multicast packets to keep a cluster of opentrackers |
60 | # synchronized. This option tells opentracker which port to listen for | 108 | # synchronized. This option tells opentracker which port to listen for |
diff --git a/ot_accesslist.c b/ot_accesslist.c index a3a2049..4b88c40 100644 --- a/ot_accesslist.c +++ b/ot_accesslist.c | |||
@@ -5,121 +5,201 @@ | |||
5 | 5 | ||
6 | /* System */ | 6 | /* System */ |
7 | #include <pthread.h> | 7 | #include <pthread.h> |
8 | #include <signal.h> | ||
9 | #include <stdio.h> | ||
8 | #include <stdlib.h> | 10 | #include <stdlib.h> |
9 | #include <string.h> | 11 | #include <string.h> |
10 | #include <stdio.h> | ||
11 | #include <signal.h> | ||
12 | #include <unistd.h> | 12 | #include <unistd.h> |
13 | #ifdef WANT_DYNAMIC_ACCESSLIST | ||
14 | #include <errno.h> | ||
15 | #include <sys/stat.h> | ||
16 | #include <sys/types.h> | ||
17 | #endif | ||
13 | 18 | ||
14 | /* Libowfat */ | 19 | /* Libowfat */ |
15 | #include "byte.h" | 20 | #include "byte.h" |
16 | #include "scan.h" | 21 | #include "fmt.h" |
17 | #include "ip6.h" | 22 | #include "ip6.h" |
18 | #include "mmap.h" | 23 | #include "mmap.h" |
24 | #include "scan.h" | ||
19 | 25 | ||
20 | /* Opentracker */ | 26 | /* Opentracker */ |
21 | #include "trackerlogic.h" | ||
22 | #include "ot_accesslist.h" | 27 | #include "ot_accesslist.h" |
23 | #include "ot_vector.h" | 28 | #include "ot_vector.h" |
29 | #include "trackerlogic.h" | ||
24 | 30 | ||
25 | /* GLOBAL VARIABLES */ | 31 | /* GLOBAL VARIABLES */ |
26 | #ifdef WANT_ACCESSLIST | 32 | #ifdef WANT_ACCESSLIST |
27 | char *g_accesslist_filename; | 33 | char *g_accesslist_filename = NULL; |
34 | #ifdef WANT_DYNAMIC_ACCESSLIST | ||
35 | char *g_accesslist_pipe_add = NULL; | ||
36 | char *g_accesslist_pipe_delete = NULL; | ||
37 | #endif | ||
28 | static pthread_mutex_t g_accesslist_mutex; | 38 | static pthread_mutex_t g_accesslist_mutex; |
29 | 39 | ||
30 | typedef struct { | 40 | /* Accesslists are lock free linked lists. We can not make them locking, because every announce |
31 | ot_hash *list; | 41 | would try to acquire the mutex, making it the most contested mutex in the whole of opentracker, |
32 | size_t size; | 42 | basically creating a central performance choke point. |
33 | } ot_accesslist; | 43 | |
34 | ot_accesslist * g_accesslist = NULL; | 44 | The idea is that updating the list heads happens under the g_accesslist_mutex guard and is |
35 | ot_accesslist * g_accesslist_old = NULL; | 45 | done atomically, while consumers might potentially still hold pointers deeper inside the list. |
36 | 46 | ||
37 | static int vector_compare_hash(const void *hash1, const void *hash2 ) { | 47 | Consumers (for now only via accesslist_hashisvalid) will always fetch the list head pointer |
38 | return memcmp( hash1, hash2, OT_HASH_COMPARE_SIZE ); | 48 | that is guaranteed to live for at least five minutes. This should be many orders of magnitudes |
49 | more than how long it will be needed by the bsearch done on the list. */ | ||
50 | struct ot_accesslist; | ||
51 | typedef struct ot_accesslist ot_accesslist; | ||
52 | struct ot_accesslist { | ||
53 | ot_hash *list; | ||
54 | size_t size; | ||
55 | ot_time base; | ||
56 | ot_accesslist *next; | ||
57 | }; | ||
58 | static ot_accesslist *_Atomic g_accesslist = NULL; | ||
59 | #ifdef WANT_DYNAMIC_ACCESSLIST | ||
60 | static ot_accesslist *_Atomic g_accesslist_add = NULL; | ||
61 | static ot_accesslist *_Atomic g_accesslist_delete = NULL; | ||
62 | #endif | ||
63 | |||
64 | /* Helpers to work on access lists */ | ||
65 | static int vector_compare_hash(const void *hash1, const void *hash2) { return memcmp(hash1, hash2, OT_HASH_COMPARE_SIZE); } | ||
66 | |||
67 | static ot_accesslist *accesslist_free(ot_accesslist *accesslist) { | ||
68 | while (accesslist) { | ||
69 | ot_accesslist *this_accesslist = accesslist; | ||
70 | accesslist = this_accesslist->next; | ||
71 | free(this_accesslist->list); | ||
72 | free(this_accesslist); | ||
73 | } | ||
74 | return NULL; | ||
75 | } | ||
76 | |||
77 | static ot_accesslist *accesslist_make(ot_accesslist *next, size_t size) { | ||
78 | ot_accesslist *accesslist_new = malloc(sizeof(ot_accesslist)); | ||
79 | if (accesslist_new) { | ||
80 | accesslist_new->list = size ? malloc(sizeof(ot_hash) * size) : NULL; | ||
81 | accesslist_new->size = size; | ||
82 | accesslist_new->base = g_now_minutes; | ||
83 | accesslist_new->next = next; | ||
84 | if (size && !accesslist_new->list) { | ||
85 | free(accesslist_new); | ||
86 | accesslist_new = NULL; | ||
87 | } | ||
88 | } | ||
89 | return accesslist_new; | ||
90 | } | ||
91 | |||
92 | /* This must be called with g_accesslist_mutex held. | ||
93 | This will never delete head, because that might still be in use. */ | ||
94 | static void accesslist_clean(ot_accesslist *accesslist) { | ||
95 | while (accesslist && accesslist->next) { | ||
96 | if (accesslist->next->base + 5 < g_now_minutes) | ||
97 | accesslist->next = accesslist_free(accesslist->next); | ||
98 | accesslist = accesslist->next; | ||
99 | } | ||
39 | } | 100 | } |
40 | 101 | ||
41 | /* Read initial access list */ | 102 | /* Read initial access list */ |
42 | static void accesslist_readfile( void ) { | 103 | static void accesslist_readfile(void) { |
43 | ot_accesslist * accesslist_new = malloc(sizeof(ot_accesslist)); | 104 | ot_accesslist *accesslist_new; |
44 | ot_hash *info_hash; | 105 | ot_hash *info_hash; |
45 | const char *map, *map_end, *read_offs; | 106 | const char *map, *map_end, *read_offs; |
46 | size_t maplen; | 107 | size_t maplen; |
47 | 108 | ||
48 | if( ( map = mmap_read( g_accesslist_filename, &maplen ) ) == NULL ) { | 109 | if ((map = mmap_read(g_accesslist_filename, &maplen)) == NULL) { |
49 | char *wd = getcwd( NULL, 0 ); | 110 | char *wd = getcwd(NULL, 0); |
50 | fprintf( stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd ); | 111 | fprintf(stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd); |
51 | free( wd ); | 112 | free(wd); |
52 | return; | 113 | return; |
53 | } | 114 | } |
54 | 115 | ||
55 | /* You need at least 41 bytes to pass an info_hash, make enough room | 116 | /* You need at least 41 bytes to pass an info_hash, make enough room |
56 | for the maximum amount of them */ | 117 | for the maximum amount of them */ |
57 | accesslist_new->size = 0; | 118 | accesslist_new = accesslist_make(g_accesslist, maplen / 41); |
58 | info_hash = accesslist_new->list = malloc( ( maplen / 41 ) * 20 ); | 119 | if (!accesslist_new) { |
59 | if( !accesslist_new->list ) { | 120 | fprintf(stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", (maplen / 41) * 20); |
60 | fprintf( stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", ( maplen / 41 ) * 20 ); | 121 | mmap_unmap(map, maplen); |
61 | mmap_unmap( map, maplen); | ||
62 | free(accesslist_new); | ||
63 | return; | 122 | return; |
64 | } | 123 | } |
124 | info_hash = accesslist_new->list; | ||
65 | 125 | ||
66 | /* No use to scan if there's not enough room for another full info_hash */ | 126 | /* No use to scan if there's not enough room for another full info_hash */ |
67 | map_end = map + maplen - 40; | 127 | map_end = map + maplen - 40; |
68 | read_offs = map; | 128 | read_offs = map; |
69 | 129 | ||
70 | /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" */ | 130 | /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" */ |
71 | while( read_offs <= map_end ) { | 131 | while (read_offs <= map_end) { |
72 | int i; | 132 | int i; |
73 | for( i=0; i<(int)sizeof(ot_hash); ++i ) { | 133 | for (i = 0; i < (int)sizeof(ot_hash); ++i) { |
74 | int eger1 = scan_fromhex( read_offs[ 2*i ] ); | 134 | int eger1 = scan_fromhex((unsigned char)read_offs[2 * i]); |
75 | int eger2 = scan_fromhex( read_offs[ 1 + 2*i ] ); | 135 | int eger2 = scan_fromhex((unsigned char)read_offs[1 + 2 * i]); |
76 | if( eger1 < 0 || eger2 < 0 ) | 136 | if (eger1 < 0 || eger2 < 0) |
77 | break; | 137 | break; |
78 | (*info_hash)[i] = eger1 * 16 + eger2; | 138 | (*info_hash)[i] = (uint8_t)(eger1 * 16 + eger2); |
79 | } | 139 | } |
80 | 140 | ||
81 | if( i == sizeof(ot_hash) ) { | 141 | if (i == sizeof(ot_hash)) { |
82 | read_offs += 40; | 142 | read_offs += 40; |
83 | 143 | ||
84 | /* Append accesslist to accesslist vector */ | 144 | /* Append accesslist to accesslist vector */ |
85 | if( read_offs == map_end || scan_fromhex( *read_offs ) < 0 ) | 145 | if (read_offs == map_end || scan_fromhex((unsigned char)*read_offs) < 0) |
86 | ++info_hash; | 146 | ++info_hash; |
87 | } | 147 | } |
88 | 148 | ||
89 | /* Find start of next line */ | 149 | /* Find start of next line */ |
90 | while( read_offs <= map_end && *(read_offs++) != '\n' ); | 150 | while (read_offs <= map_end && *(read_offs++) != '\n') |
151 | ; | ||
91 | } | 152 | } |
92 | #ifdef _DEBUG | 153 | #ifdef _DEBUG |
93 | fprintf( stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new->list) ); | 154 | fprintf(stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new->list)); |
94 | #endif | 155 | #endif |
95 | 156 | ||
96 | mmap_unmap( map, maplen); | 157 | mmap_unmap(map, maplen); |
97 | 158 | ||
98 | qsort( accesslist_new->list, info_hash - accesslist_new->list, sizeof( *info_hash ), vector_compare_hash ); | 159 | qsort(accesslist_new->list, info_hash - accesslist_new->list, sizeof(*info_hash), vector_compare_hash); |
99 | accesslist_new->size = info_hash - accesslist_new->list; | 160 | accesslist_new->size = info_hash - accesslist_new->list; |
100 | 161 | ||
101 | /* Now exchange the accesslist vector in the least race condition prone way */ | 162 | /* Now exchange the accesslist vector in the least race condition prone way */ |
102 | pthread_mutex_lock(&g_accesslist_mutex); | 163 | pthread_mutex_lock(&g_accesslist_mutex); |
164 | accesslist_new->next = g_accesslist; | ||
165 | g_accesslist = accesslist_new; /* Only now set a new list */ | ||
166 | |||
167 | #ifdef WANT_DYNAMIC_ACCESSLIST | ||
168 | /* If we have dynamic accesslists, reloading a new one will always void the add/delete lists. | ||
169 | Insert empty ones at the list head */ | ||
170 | if (g_accesslist_add && (accesslist_new = accesslist_make(g_accesslist_add, 0)) != NULL) | ||
171 | g_accesslist_add = accesslist_new; | ||
172 | if (g_accesslist_delete && (accesslist_new = accesslist_make(g_accesslist_delete, 0)) != NULL) | ||
173 | g_accesslist_delete = accesslist_new; | ||
174 | #endif | ||
103 | 175 | ||
104 | if (g_accesslist_old) { | 176 | accesslist_clean(g_accesslist); |
105 | free(g_accesslist_old->list); | ||
106 | free(g_accesslist_old); | ||
107 | } | ||
108 | |||
109 | g_accesslist_old = g_accesslist; /* Keep a copy for later free */ | ||
110 | g_accesslist = accesslist_new; /* Only now set a new list */ | ||
111 | 177 | ||
112 | pthread_mutex_unlock(&g_accesslist_mutex); | 178 | pthread_mutex_unlock(&g_accesslist_mutex); |
113 | } | 179 | } |
114 | 180 | ||
115 | int accesslist_hashisvalid( ot_hash hash ) { | 181 | int accesslist_hashisvalid(ot_hash hash) { |
116 | /* Get working copy of current access list */ | 182 | /* Get working copy of current access list */ |
117 | ot_accesslist * accesslist = g_accesslist; | 183 | ot_accesslist *accesslist = g_accesslist; |
118 | 184 | #ifdef WANT_DYNAMIC_ACCESSLIST | |
119 | void * exactmatch = NULL; | 185 | ot_accesslist *accesslist_add, *accesslist_delete; |
186 | #endif | ||
187 | void *exactmatch = NULL; | ||
120 | 188 | ||
121 | if (accesslist) | 189 | if (accesslist) |
122 | exactmatch = bsearch( hash, accesslist->list, accesslist->size, OT_HASH_COMPARE_SIZE, vector_compare_hash ); | 190 | exactmatch = bsearch(hash, accesslist->list, accesslist->size, OT_HASH_COMPARE_SIZE, vector_compare_hash); |
191 | |||
192 | #ifdef WANT_DYNAMIC_ACCESSLIST | ||
193 | /* If we had no match on the main list, scan the list of dynamically added hashes */ | ||
194 | accesslist_add = g_accesslist_add; | ||
195 | if ((exactmatch == NULL) && accesslist_add) | ||
196 | exactmatch = bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash); | ||
197 | |||
198 | /* If we found a matching hash on the main list, scan the list of dynamically deleted hashes */ | ||
199 | accesslist_delete = g_accesslist_delete; | ||
200 | if ((exactmatch != NULL) && accesslist_delete && bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash)) | ||
201 | exactmatch = NULL; | ||
202 | #endif | ||
123 | 203 | ||
124 | #ifdef WANT_ACCESSLIST_BLACK | 204 | #ifdef WANT_ACCESSLIST_BLACK |
125 | return exactmatch == NULL; | 205 | return exactmatch == NULL; |
@@ -128,79 +208,210 @@ int accesslist_hashisvalid( ot_hash hash ) { | |||
128 | #endif | 208 | #endif |
129 | } | 209 | } |
130 | 210 | ||
131 | static void * accesslist_worker( void * args ) { | 211 | static void *accesslist_worker(void *args) { |
132 | int sig; | 212 | int sig; |
133 | sigset_t signal_mask; | 213 | sigset_t signal_mask; |
134 | 214 | ||
135 | sigemptyset(&signal_mask); | 215 | sigemptyset(&signal_mask); |
136 | sigaddset(&signal_mask, SIGHUP); | 216 | sigaddset(&signal_mask, SIGHUP); |
137 | 217 | ||
138 | (void)args; | 218 | (void)args; |
139 | 219 | ||
140 | while( 1 ) { | 220 | while (1) { |
221 | if (!g_opentracker_running) | ||
222 | return NULL; | ||
141 | 223 | ||
142 | /* Initial attempt to read accesslist */ | 224 | /* Initial attempt to read accesslist */ |
143 | accesslist_readfile( ); | 225 | accesslist_readfile(); |
144 | 226 | ||
145 | /* Wait for signals */ | 227 | /* Wait for signals */ |
146 | while( sigwait (&signal_mask, &sig) != 0 && sig != SIGHUP ); | 228 | while (sigwait(&signal_mask, &sig) != 0 && sig != SIGHUP) |
229 | ; | ||
230 | } | ||
231 | return NULL; | ||
232 | } | ||
233 | |||
234 | #ifdef WANT_DYNAMIC_ACCESSLIST | ||
235 | static pthread_t thread_adder_id, thread_deleter_id; | ||
236 | static void *accesslist_adddel_worker(char *fifoname, ot_accesslist *_Atomic *adding_to, ot_accesslist *_Atomic *removing_from) { | ||
237 | struct stat st; | ||
238 | |||
239 | if (!stat(fifoname, &st)) { | ||
240 | if (!S_ISFIFO(st.st_mode)) { | ||
241 | fprintf(stderr, "Error when starting dynamic accesslists: Found Non-FIFO file at %s.\nPlease remove it and restart opentracker.\n", fifoname); | ||
242 | return NULL; | ||
243 | } | ||
244 | } else { | ||
245 | int error = mkfifo(fifoname, 0755); | ||
246 | if (error && error != EEXIST) { | ||
247 | fprintf(stderr, "Error when starting dynamic accesslists: Couldn't create FIFO at %s, error: %s\n", fifoname, strerror(errno)); | ||
248 | return NULL; | ||
249 | } | ||
250 | } | ||
251 | |||
252 | while (g_opentracker_running) { | ||
253 | FILE *fifo = fopen(fifoname, "r"); | ||
254 | char *line = NULL; | ||
255 | size_t linecap = 0; | ||
256 | ssize_t linelen; | ||
257 | |||
258 | if (!fifo) { | ||
259 | fprintf(stderr, "Error when reading dynamic accesslists: Couldn't open FIFO at %s, error: %s\n", fifoname, strerror(errno)); | ||
260 | return NULL; | ||
261 | } | ||
262 | |||
263 | while ((linelen = getline(&line, &linecap, fifo)) > 0) { | ||
264 | ot_hash info_hash; | ||
265 | int i; | ||
266 | |||
267 | printf("Got line %*s", (int)linelen, line); | ||
268 | /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" | ||
269 | If there's not enough characters for an info_hash in the line, skip it. */ | ||
270 | if (linelen < 41) | ||
271 | continue; | ||
272 | |||
273 | for (i = 0; i < (int)sizeof(ot_hash); ++i) { | ||
274 | int eger1 = scan_fromhex((unsigned char)line[2 * i]); | ||
275 | int eger2 = scan_fromhex((unsigned char)line[1 + 2 * i]); | ||
276 | if (eger1 < 0 || eger2 < 0) | ||
277 | break; | ||
278 | ((uint8_t *)info_hash)[i] = (uint8_t)(eger1 * 16 + eger2); | ||
279 | } | ||
280 | printf("parsed info_hash %20s\n", info_hash); | ||
281 | if (i != sizeof(ot_hash)) | ||
282 | continue; | ||
283 | |||
284 | /* From now on we modify g_accesslist_add and g_accesslist_delete, so prevent the | ||
285 | other worker threads from doing the same */ | ||
286 | pthread_mutex_lock(&g_accesslist_mutex); | ||
287 | |||
288 | /* If the info hash is in the removing_from list, create a new head without that entry */ | ||
289 | if (*removing_from && (*removing_from)->list) { | ||
290 | ot_hash *exactmatch = bsearch(info_hash, (*removing_from)->list, (*removing_from)->size, OT_HASH_COMPARE_SIZE, vector_compare_hash); | ||
291 | if (exactmatch) { | ||
292 | ptrdiff_t off = exactmatch - (*removing_from)->list; | ||
293 | ot_accesslist *accesslist_new = accesslist_make(*removing_from, (*removing_from)->size - 1); | ||
294 | if (accesslist_new) { | ||
295 | memcpy(accesslist_new->list, (*removing_from)->list, sizeof(ot_hash) * off); | ||
296 | memcpy(accesslist_new->list + off, (*removing_from)->list + off + 1, (*removing_from)->size - off - 1); | ||
297 | *removing_from = accesslist_new; | ||
298 | } | ||
299 | } | ||
300 | } | ||
301 | |||
302 | /* Simple case: there's no adding_to list yet, create one with one member */ | ||
303 | if (!*adding_to) { | ||
304 | ot_accesslist *accesslist_new = accesslist_make(NULL, 1); | ||
305 | if (accesslist_new) { | ||
306 | memcpy(accesslist_new->list, info_hash, sizeof(ot_hash)); | ||
307 | *adding_to = accesslist_new; | ||
308 | } | ||
309 | } else { | ||
310 | int exactmatch = 0; | ||
311 | ot_hash *insert_point = binary_search(info_hash, (*adding_to)->list, (*adding_to)->size, OT_HASH_COMPARE_SIZE, sizeof(ot_hash), &exactmatch); | ||
312 | |||
313 | /* Only if the info hash is not in the adding_to list, create a new head with that entry */ | ||
314 | if (!exactmatch) { | ||
315 | ot_accesslist *accesslist_new = accesslist_make(*adding_to, (*adding_to)->size + 1); | ||
316 | ptrdiff_t off = insert_point - (*adding_to)->list; | ||
317 | if (accesslist_new) { | ||
318 | memcpy(accesslist_new->list, (*adding_to)->list, sizeof(ot_hash) * off); | ||
319 | memcpy(accesslist_new->list + off, info_hash, sizeof(info_hash)); | ||
320 | memcpy(accesslist_new->list + off + 1, (*adding_to)->list + off, (*adding_to)->size - off); | ||
321 | *adding_to = accesslist_new; | ||
322 | } | ||
323 | } | ||
324 | } | ||
325 | |||
326 | pthread_mutex_unlock(&g_accesslist_mutex); | ||
327 | } | ||
328 | |||
329 | fclose(fifo); | ||
147 | } | 330 | } |
148 | return NULL; | 331 | return NULL; |
149 | } | 332 | } |
150 | 333 | ||
334 | static void *accesslist_adder_worker(void *args) { | ||
335 | (void)args; | ||
336 | return accesslist_adddel_worker(g_accesslist_pipe_add, &g_accesslist_add, &g_accesslist_delete); | ||
337 | } | ||
338 | static void *accesslist_deleter_worker(void *args) { | ||
339 | (void)args; | ||
340 | return accesslist_adddel_worker(g_accesslist_pipe_delete, &g_accesslist_delete, &g_accesslist_add); | ||
341 | } | ||
342 | #endif | ||
343 | |||
151 | static pthread_t thread_id; | 344 | static pthread_t thread_id; |
152 | void accesslist_init( ) { | 345 | void accesslist_init() { |
153 | pthread_mutex_init(&g_accesslist_mutex, NULL); | 346 | pthread_mutex_init(&g_accesslist_mutex, NULL); |
154 | pthread_create( &thread_id, NULL, accesslist_worker, NULL ); | 347 | pthread_create(&thread_id, NULL, accesslist_worker, NULL); |
348 | #ifdef WANT_DYNAMIC_ACCESSLIST | ||
349 | if (g_accesslist_pipe_add) | ||
350 | pthread_create(&thread_adder_id, NULL, accesslist_adder_worker, NULL); | ||
351 | if (g_accesslist_pipe_delete) | ||
352 | pthread_create(&thread_deleter_id, NULL, accesslist_deleter_worker, NULL); | ||
353 | #endif | ||
155 | } | 354 | } |
156 | 355 | ||
157 | void accesslist_deinit( void ) { | 356 | void accesslist_deinit(void) { |
158 | pthread_cancel( thread_id ); | 357 | /* Wake up sleeping worker */ |
358 | pthread_kill(thread_id, SIGHUP); | ||
359 | |||
360 | pthread_mutex_lock(&g_accesslist_mutex); | ||
361 | |||
362 | g_accesslist = accesslist_free(g_accesslist); | ||
363 | |||
364 | #ifdef WANT_DYNAMIC_ACCESSLIST | ||
365 | g_accesslist_add = accesslist_free(g_accesslist_add); | ||
366 | g_accesslist_delete = accesslist_free(g_accesslist_delete); | ||
367 | #endif | ||
368 | |||
369 | pthread_mutex_unlock(&g_accesslist_mutex); | ||
370 | pthread_cancel(thread_id); | ||
159 | pthread_mutex_destroy(&g_accesslist_mutex); | 371 | pthread_mutex_destroy(&g_accesslist_mutex); |
372 | } | ||
160 | 373 | ||
161 | if (g_accesslist_old) { | 374 | void accesslist_cleanup(void) { |
162 | free(g_accesslist_old->list); | 375 | pthread_mutex_lock(&g_accesslist_mutex); |
163 | free(g_accesslist_old); | ||
164 | g_accesslist_old = 0; | ||
165 | } | ||
166 | 376 | ||
167 | if (g_accesslist) { | 377 | accesslist_clean(g_accesslist); |
168 | free(g_accesslist->list); | 378 | #if WANT_DYNAMIC_ACCESSLIST |
169 | free(g_accesslist); | 379 | accesslist_clean(g_accesslist_add); |
170 | g_accesslist = 0; | 380 | accesslist_clean(g_accesslist_delete); |
171 | } | 381 | #endif |
382 | |||
383 | pthread_mutex_unlock(&g_accesslist_mutex); | ||
172 | } | 384 | } |
173 | #endif | 385 | #endif |
174 | 386 | ||
175 | int address_in_net( const ot_ip6 address, const ot_net *net ) { | 387 | int address_in_net(const ot_ip6 address, const ot_net *net) { |
176 | int bits = net->bits; | 388 | int bits = net->bits, checkbits = (0x7f00 >> (bits & 7)); |
177 | int result = memcmp( address, &net->address, bits >> 3 ); | 389 | int result = memcmp(address, &net->address, bits >> 3); |
178 | if( !result && ( bits & 7 ) ) | 390 | if (!result && (bits & 7)) |
179 | result = ( ( 0x7f00 >> ( bits & 7 ) ) & address[bits>>3] ) - net->address[bits>>3]; | 391 | result = (checkbits & address[bits >> 3]) - (checkbits & net->address[bits >> 3]); |
180 | return result == 0; | 392 | return result == 0; |
181 | } | 393 | } |
182 | 394 | ||
183 | void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size ) { | 395 | void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size) { |
184 | size_t i; | 396 | size_t i; |
185 | int exactmatch; | 397 | int exactmatch; |
186 | 398 | ||
187 | /* Caller must have a concept of ot_net in it's member */ | 399 | /* Caller must have a concept of ot_net in it's member */ |
188 | if( member_size < sizeof(ot_net) ) | 400 | if (member_size < sizeof(ot_net)) |
189 | return 0; | 401 | return 0; |
190 | 402 | ||
191 | /* Check each net in vector for overlap */ | 403 | /* Check each net in vector for overlap */ |
192 | uint8_t *member = ((uint8_t*)vector->data); | 404 | uint8_t *member = ((uint8_t *)vector->data); |
193 | for( i=0; i<vector->size; ++i ) { | 405 | for (i = 0; i < vector->size; ++i) { |
194 | if( address_in_net( *(ot_ip6*)member, net ) || | 406 | if (address_in_net(*(ot_ip6 *)member, net) || address_in_net(net->address, (ot_net *)member)) |
195 | address_in_net( net->address, (ot_net*)member ) ) | ||
196 | return 0; | 407 | return 0; |
197 | member += member_size; | 408 | member += member_size; |
198 | } | 409 | } |
199 | 410 | ||
200 | member = vector_find_or_insert( vector, (void*)net, member_size, sizeof(ot_net), &exactmatch ); | 411 | member = vector_find_or_insert(vector, (void *)net, member_size, sizeof(ot_net), &exactmatch); |
201 | if( member ) { | 412 | if (member) { |
202 | memcpy( member, net, sizeof(ot_net)); | 413 | memcpy(member, net, sizeof(ot_net)); |
203 | memcpy( member + sizeof(ot_net), value, member_size - sizeof(ot_net)); | 414 | memcpy(member + sizeof(ot_net), value, member_size - sizeof(ot_net)); |
204 | } | 415 | } |
205 | 416 | ||
206 | return member; | 417 | return member; |
@@ -208,43 +419,43 @@ void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value | |||
208 | 419 | ||
209 | /* Takes a vector filled with { ot_net net, uint8_t[x] value }; | 420 | /* Takes a vector filled with { ot_net net, uint8_t[x] value }; |
210 | Returns value associated with the net, or NULL if not found */ | 421 | Returns value associated with the net, or NULL if not found */ |
211 | void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size ) { | 422 | void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size) { |
212 | int exactmatch; | 423 | int exactmatch; |
213 | /* This binary search will return a pointer to the first non-containing network... */ | 424 | /* This binary search will return a pointer to the first non-containing network... */ |
214 | ot_net *net = binary_search( address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch ); | 425 | ot_net *net = binary_search(address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch); |
215 | if( !net ) | 426 | if (!net) |
216 | return NULL; | 427 | return NULL; |
217 | /* ... so we'll need to move back one step unless we've exactly hit the first address in network */ | 428 | /* ... so we'll need to move back one step unless we've exactly hit the first address in network */ |
218 | if( !exactmatch && ( (void*)net > vector->data ) ) | 429 | if (!exactmatch && ((void *)net > vector->data)) |
219 | --net; | 430 | --net; |
220 | if( !address_in_net( address, net ) ) | 431 | if (!address_in_net(address, net)) |
221 | return NULL; | 432 | return NULL; |
222 | return (void*)net; | 433 | return (void *)net; |
223 | } | 434 | } |
224 | 435 | ||
225 | #ifdef WANT_FULLLOG_NETWORKS | 436 | #ifdef WANT_FULLLOG_NETWORKS |
226 | static ot_vector g_lognets_list; | 437 | static ot_vector g_lognets_list; |
227 | ot_log *g_logchain_first, *g_logchain_last; | 438 | ot_log *g_logchain_first, *g_logchain_last; |
228 | |||
229 | static pthread_mutex_t g_lognets_list_mutex = PTHREAD_MUTEX_INITIALIZER; | 439 | static pthread_mutex_t g_lognets_list_mutex = PTHREAD_MUTEX_INITIALIZER; |
230 | void loglist_add_network( const ot_net *net ) { | 440 | |
441 | void loglist_add_network(const ot_net *net) { | ||
231 | pthread_mutex_lock(&g_lognets_list_mutex); | 442 | pthread_mutex_lock(&g_lognets_list_mutex); |
232 | set_value_for_net( net, &g_lognets_list, NULL, sizeof(ot_net)); | 443 | set_value_for_net(net, &g_lognets_list, NULL, sizeof(ot_net)); |
233 | pthread_mutex_unlock(&g_lognets_list_mutex); | 444 | pthread_mutex_unlock(&g_lognets_list_mutex); |
234 | } | 445 | } |
235 | 446 | ||
236 | void loglist_reset( ) { | 447 | void loglist_reset() { |
237 | pthread_mutex_lock(&g_lognets_list_mutex); | 448 | pthread_mutex_lock(&g_lognets_list_mutex); |
238 | free( g_lognets_list.data ); | 449 | free(g_lognets_list.data); |
239 | g_lognets_list.data = 0; | 450 | g_lognets_list.data = 0; |
240 | g_lognets_list.size = g_lognets_list.space = 0; | 451 | g_lognets_list.size = g_lognets_list.space = 0; |
241 | pthread_mutex_unlock(&g_lognets_list_mutex); | 452 | pthread_mutex_unlock(&g_lognets_list_mutex); |
242 | } | 453 | } |
243 | 454 | ||
244 | int loglist_check_address( const ot_ip6 address ) { | 455 | int loglist_check_address(const ot_ip6 address) { |
245 | int result; | 456 | int result; |
246 | pthread_mutex_lock(&g_lognets_list_mutex); | 457 | pthread_mutex_lock(&g_lognets_list_mutex); |
247 | result = ( NULL != get_value_for_net( address, &g_lognets_list, sizeof(ot_net)) ); | 458 | result = (NULL != get_value_for_net(address, &g_lognets_list, sizeof(ot_net))); |
248 | pthread_mutex_unlock(&g_lognets_list_mutex); | 459 | pthread_mutex_unlock(&g_lognets_list_mutex); |
249 | return result; | 460 | return result; |
250 | } | 461 | } |
@@ -252,44 +463,44 @@ int loglist_check_address( const ot_ip6 address ) { | |||
252 | 463 | ||
253 | #ifdef WANT_IP_FROM_PROXY | 464 | #ifdef WANT_IP_FROM_PROXY |
254 | typedef struct { | 465 | typedef struct { |
255 | ot_net *proxy; | 466 | ot_net *proxy; |
256 | ot_vector networks; | 467 | ot_vector networks; |
257 | } ot_proxymap; | 468 | } ot_proxymap; |
258 | 469 | ||
259 | static ot_vector g_proxies_list; | 470 | static ot_vector g_proxies_list; |
260 | static pthread_mutex_t g_proxies_list_mutex = PTHREAD_MUTEX_INITIALIZER; | 471 | static pthread_mutex_t g_proxies_list_mutex = PTHREAD_MUTEX_INITIALIZER; |
261 | 472 | ||
262 | int proxylist_add_network( const ot_net *proxy, const ot_net *net ) { | 473 | int proxylist_add_network(const ot_net *proxy, const ot_net *net) { |
263 | ot_proxymap *map; | 474 | ot_proxymap *map; |
264 | int exactmatch, result = 1; | 475 | int exactmatch, result = 1; |
265 | pthread_mutex_lock(&g_proxies_list_mutex); | 476 | pthread_mutex_lock(&g_proxies_list_mutex); |
266 | 477 | ||
267 | /* If we have a direct hit, use and extend the vector there */ | 478 | /* If we have a direct hit, use and extend the vector there */ |
268 | map = binary_search( proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch ); | 479 | map = binary_search(proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch); |
269 | 480 | ||
270 | if( !map || !exactmatch ) { | 481 | if (!map || !exactmatch) { |
271 | /* else see, if we've got overlapping networks | 482 | /* else see, if we've got overlapping networks |
272 | and get a new empty vector if not */ | 483 | and get a new empty vector if not */ |
273 | ot_vector empty; | 484 | ot_vector empty; |
274 | memset( &empty, 0, sizeof( ot_vector ) ); | 485 | memset(&empty, 0, sizeof(ot_vector)); |
275 | map = set_value_for_net( proxy, &g_proxies_list, &empty, sizeof(ot_proxymap)); | 486 | map = set_value_for_net(proxy, &g_proxies_list, &empty, sizeof(ot_proxymap)); |
276 | } | 487 | } |
277 | 488 | ||
278 | if( map && set_value_for_net( net, &map->networks, NULL, sizeof(ot_net) ) ) | 489 | if (map && set_value_for_net(net, &map->networks, NULL, sizeof(ot_net))) |
279 | result = 1; | 490 | result = 1; |
280 | 491 | ||
281 | pthread_mutex_unlock(&g_proxies_list_mutex); | 492 | pthread_mutex_unlock(&g_proxies_list_mutex); |
282 | return result; | 493 | return result; |
283 | } | 494 | } |
284 | 495 | ||
285 | int proxylist_check_proxy( const ot_ip6 proxy, const ot_ip6 address ) { | 496 | int proxylist_check_proxy(const ot_ip6 proxy, const ot_ip6 address) { |
286 | int result = 0; | 497 | int result = 0; |
287 | ot_proxymap *map; | 498 | ot_proxymap *map; |
288 | 499 | ||
289 | pthread_mutex_lock(&g_proxies_list_mutex); | 500 | pthread_mutex_lock(&g_proxies_list_mutex); |
290 | 501 | ||
291 | if( ( map = get_value_for_net( proxy, &g_proxies_list, sizeof(ot_proxymap) ) ) ) | 502 | if ((map = get_value_for_net(proxy, &g_proxies_list, sizeof(ot_proxymap)))) |
292 | if( !address || get_value_for_net( address, &map->networks, sizeof(ot_net) ) ) | 503 | if (!address || get_value_for_net(address, &map->networks, sizeof(ot_net))) |
293 | result = 1; | 504 | result = 1; |
294 | 505 | ||
295 | pthread_mutex_unlock(&g_proxies_list_mutex); | 506 | pthread_mutex_unlock(&g_proxies_list_mutex); |
@@ -298,42 +509,53 @@ int proxylist_check_proxy( const ot_ip6 proxy, const ot_ip6 address ) { | |||
298 | 509 | ||
299 | #endif | 510 | #endif |
300 | 511 | ||
301 | static ot_ip6 g_adminip_addresses[OT_ADMINIP_MAX]; | 512 | static ot_net g_admin_nets[OT_ADMINIP_MAX]; |
302 | static ot_permissions g_adminip_permissions[OT_ADMINIP_MAX]; | 513 | static ot_permissions g_admin_nets_permissions[OT_ADMINIP_MAX]; |
303 | static unsigned int g_adminip_count = 0; | 514 | static unsigned int g_admin_nets_count = 0; |
304 | 515 | ||
305 | int accesslist_blessip( ot_ip6 ip, ot_permissions permissions ) { | 516 | int accesslist_bless_net(ot_net *net, ot_permissions permissions) { |
306 | if( g_adminip_count >= OT_ADMINIP_MAX ) | 517 | if (g_admin_nets_count >= OT_ADMINIP_MAX) |
307 | return -1; | 518 | return -1; |
308 | 519 | ||
309 | memcpy(g_adminip_addresses + g_adminip_count,ip,sizeof(ot_ip6)); | 520 | memcpy(g_admin_nets + g_admin_nets_count, net, sizeof(ot_net)); |
310 | g_adminip_permissions[ g_adminip_count++ ] = permissions; | 521 | g_admin_nets_permissions[g_admin_nets_count++] = permissions; |
311 | 522 | ||
312 | #ifdef _DEBUG | 523 | #ifdef _DEBUG |
313 | { | 524 | { |
314 | char _debug[512]; | 525 | char _debug[512]; |
315 | int off = snprintf( _debug, sizeof(_debug), "Blessing ip address " ); | 526 | int off = snprintf(_debug, sizeof(_debug), "Blessing ip net "); |
316 | off += fmt_ip6c(_debug+off, ip ); | 527 | off += fmt_ip6c(_debug + off, net->address); |
317 | 528 | if (net->bits < 128) { | |
318 | if( permissions & OT_PERMISSION_MAY_STAT ) off += snprintf( _debug+off, 512-off, " may_fetch_stats" ); | 529 | _debug[off++] = '/'; |
319 | if( permissions & OT_PERMISSION_MAY_LIVESYNC ) off += snprintf( _debug+off, 512-off, " may_sync_live" ); | 530 | if (ip6_isv4mapped(net->address)) |
320 | if( permissions & OT_PERMISSION_MAY_FULLSCRAPE ) off += snprintf( _debug+off, 512-off, " may_fetch_fullscrapes" ); | 531 | off += fmt_long(_debug + off, net->bits - 96); |
321 | if( permissions & OT_PERMISSION_MAY_PROXY ) off += snprintf( _debug+off, 512-off, " may_proxy" ); | 532 | else |
322 | if( !permissions ) off += snprintf( _debug+off, sizeof(_debug)-off, " nothing\n" ); | 533 | off += fmt_long(_debug + off, net->bits); |
534 | } | ||
535 | |||
536 | if (permissions & OT_PERMISSION_MAY_STAT) | ||
537 | off += snprintf(_debug + off, 512 - off, " may_fetch_stats"); | ||
538 | if (permissions & OT_PERMISSION_MAY_LIVESYNC) | ||
539 | off += snprintf(_debug + off, 512 - off, " may_sync_live"); | ||
540 | if (permissions & OT_PERMISSION_MAY_FULLSCRAPE) | ||
541 | off += snprintf(_debug + off, 512 - off, " may_fetch_fullscrapes"); | ||
542 | if (permissions & OT_PERMISSION_MAY_PROXY) | ||
543 | off += snprintf(_debug + off, 512 - off, " may_proxy"); | ||
544 | if (!permissions) | ||
545 | off += snprintf(_debug + off, sizeof(_debug) - off, " nothing"); | ||
323 | _debug[off++] = '.'; | 546 | _debug[off++] = '.'; |
324 | (void)write( 2, _debug, off ); | 547 | _debug[off++] = '\n'; |
548 | (void)write(2, _debug, off); | ||
325 | } | 549 | } |
326 | #endif | 550 | #endif |
327 | 551 | ||
328 | return 0; | 552 | return 0; |
329 | } | 553 | } |
330 | 554 | ||
331 | int accesslist_isblessed( ot_ip6 ip, ot_permissions permissions ) { | 555 | int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions) { |
332 | unsigned int i; | 556 | unsigned int i; |
333 | for( i=0; i<g_adminip_count; ++i ) | 557 | for (i = 0; i < g_admin_nets_count; ++i) |
334 | if( !memcmp( g_adminip_addresses + i, ip, sizeof(ot_ip6)) && ( g_adminip_permissions[ i ] & permissions ) ) | 558 | if (address_in_net(ip, g_admin_nets + i) && (g_admin_nets_permissions[i] & permissions)) |
335 | return 1; | 559 | return 1; |
336 | return 0; | 560 | return 0; |
337 | } | 561 | } |
338 | |||
339 | const char *g_version_accesslist_c = "$Source$: $Revision$\n"; | ||
diff --git a/ot_accesslist.h b/ot_accesslist.h index b38b91a..0a7488e 100644 --- a/ot_accesslist.h +++ b/ot_accesslist.h | |||
@@ -6,26 +6,37 @@ | |||
6 | #ifndef OT_ACCESSLIST_H__ | 6 | #ifndef OT_ACCESSLIST_H__ |
7 | #define OT_ACCESSLIST_H__ | 7 | #define OT_ACCESSLIST_H__ |
8 | 8 | ||
9 | #if defined ( WANT_ACCESSLIST_BLACK ) && defined (WANT_ACCESSLIST_WHITE ) | 9 | #include "trackerlogic.h" |
10 | # error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive. | 10 | |
11 | #if defined(WANT_ACCESSLIST_BLACK) && defined(WANT_ACCESSLIST_WHITE) | ||
12 | #error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive. | ||
11 | #endif | 13 | #endif |
12 | 14 | ||
13 | #if defined ( WANT_ACCESSLIST_BLACK ) || defined (WANT_ACCESSLIST_WHITE ) | 15 | #if defined(WANT_ACCESSLIST_BLACK) || defined(WANT_ACCESSLIST_WHITE) |
14 | #define WANT_ACCESSLIST | 16 | #define WANT_ACCESSLIST |
15 | void accesslist_init( ); | 17 | void accesslist_init(void); |
16 | void accesslist_deinit( ); | 18 | void accesslist_deinit(void); |
17 | int accesslist_hashisvalid( ot_hash hash ); | 19 | int accesslist_hashisvalid(ot_hash hash); |
20 | void accesslist_cleanup(void); | ||
18 | 21 | ||
19 | extern char *g_accesslist_filename; | 22 | extern char *g_accesslist_filename; |
23 | #ifdef WANT_DYNAMIC_ACCESSLIST | ||
24 | extern char *g_accesslist_pipe_add; | ||
25 | extern char *g_accesslist_pipe_delete; | ||
26 | #endif | ||
20 | 27 | ||
21 | #else | 28 | #else |
22 | #define accesslist_init( accesslist_filename ) | 29 | #ifdef WANT_DYNAMIC_ACCESSLIST |
23 | #define accesslist_deinit( ) | 30 | #error WANT_DYNAMIC_ACCESSLIST needs either WANT_ACCESSLIST_BLACK or WANT_ACCESSLIST_WHITE |
24 | #define accesslist_hashisvalid( hash ) 1 | 31 | #endif |
32 | |||
33 | #define accesslist_init(accesslist_filename) | ||
34 | #define accesslist_deinit() | ||
35 | #define accesslist_hashisvalid(hash) 1 | ||
25 | #endif | 36 | #endif |
26 | 37 | ||
27 | /* Test if an address is subset of an ot_net, return value is considered a bool */ | 38 | /* Test if an address is subset of an ot_net, return value is considered a bool */ |
28 | int address_in_net( const ot_ip6 address, const ot_net *net ); | 39 | int address_in_net(const ot_ip6 address, const ot_net *net); |
29 | 40 | ||
30 | /* Store a value into a vector of struct { ot_net net, uint8_t[x] value } member; | 41 | /* Store a value into a vector of struct { ot_net net, uint8_t[x] value } member; |
31 | returns NULL | 42 | returns NULL |
@@ -36,18 +47,17 @@ int address_in_net( const ot_ip6 address, const ot_net *net ); | |||
36 | returns pointer to new member in vector for success | 47 | returns pointer to new member in vector for success |
37 | member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping | 48 | member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping |
38 | */ | 49 | */ |
39 | void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size ); | 50 | void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size); |
40 | 51 | ||
41 | /* Takes a vector filled with struct { ot_net net, uint8_t[x] value } member; | 52 | /* Takes a vector filled with struct { ot_net net, uint8_t[x] value } member; |
42 | Returns pointer to _member_ associated with the net, or NULL if not found | 53 | Returns pointer to _member_ associated with the net, or NULL if not found |
43 | member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping | 54 | member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping |
44 | */ | 55 | */ |
45 | void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size ); | 56 | void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size); |
46 | |||
47 | 57 | ||
48 | #ifdef WANT_IP_FROM_PROXY | 58 | #ifdef WANT_IP_FROM_PROXY |
49 | int proxylist_add_network( const ot_net *proxy, const ot_net *net ); | 59 | int proxylist_add_network(const ot_net *proxy, const ot_net *net); |
50 | int proxylist_check_network( const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */ ); | 60 | int proxylist_check_network(const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */); |
51 | #endif | 61 | #endif |
52 | 62 | ||
53 | #ifdef WANT_FULLLOG_NETWORKS | 63 | #ifdef WANT_FULLLOG_NETWORKS |
@@ -61,10 +71,10 @@ struct ot_log { | |||
61 | }; | 71 | }; |
62 | extern ot_log *g_logchain_first, *g_logchain_last; | 72 | extern ot_log *g_logchain_first, *g_logchain_last; |
63 | 73 | ||
64 | void loglist_add_network( const ot_net *net ); | 74 | void loglist_add_network(const ot_net *net); |
65 | void loglist_reset( ); | 75 | void loglist_reset(); |
66 | int loglist_check_address( const ot_ip6 address ); | 76 | int loglist_check_address(const ot_ip6 address); |
67 | #endif | 77 | #endif |
68 | 78 | ||
69 | typedef enum { | 79 | typedef enum { |
70 | OT_PERMISSION_MAY_FULLSCRAPE = 0x1, | 80 | OT_PERMISSION_MAY_FULLSCRAPE = 0x1, |
@@ -73,7 +83,7 @@ typedef enum { | |||
73 | OT_PERMISSION_MAY_PROXY = 0x8 | 83 | OT_PERMISSION_MAY_PROXY = 0x8 |
74 | } ot_permissions; | 84 | } ot_permissions; |
75 | 85 | ||
76 | int accesslist_blessip( ot_ip6 ip, ot_permissions permissions ); | 86 | int accesslist_bless_net(ot_net *net, ot_permissions permissions); |
77 | int accesslist_isblessed( ot_ip6 ip, ot_permissions permissions ); | 87 | int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions); |
78 | 88 | ||
79 | #endif | 89 | #endif |
@@ -5,89 +5,91 @@ | |||
5 | 5 | ||
6 | /* System */ | 6 | /* System */ |
7 | #include <pthread.h> | 7 | #include <pthread.h> |
8 | #include <unistd.h> | ||
9 | #include <string.h> | 8 | #include <string.h> |
9 | #include <unistd.h> | ||
10 | 10 | ||
11 | /* Libowfat */ | 11 | /* Libowfat */ |
12 | #include "io.h" | 12 | #include "io.h" |
13 | 13 | ||
14 | /* Opentracker */ | 14 | /* Opentracker */ |
15 | #include "trackerlogic.h" | 15 | #include "ot_accesslist.h" |
16 | #include "ot_mutex.h" | ||
17 | #include "ot_vector.h" | ||
18 | #include "ot_clean.h" | 16 | #include "ot_clean.h" |
17 | #include "ot_mutex.h" | ||
19 | #include "ot_stats.h" | 18 | #include "ot_stats.h" |
19 | #include "ot_vector.h" | ||
20 | #include "trackerlogic.h" | ||
20 | 21 | ||
21 | /* Returns amount of removed peers */ | 22 | /* Returns amount of removed peers */ |
22 | static ssize_t clean_single_bucket( ot_peer *peers, size_t peer_count, time_t timedout, int *removed_seeders ) { | 23 | static ssize_t clean_single_bucket(ot_peer *peers, size_t peer_count, size_t peer_size, time_t timedout, int *removed_seeders) { |
23 | ot_peer *last_peer = peers + peer_count, *insert_point; | 24 | ot_peer *last_peer = peers + peer_count * peer_size, *insert_point; |
24 | time_t timediff; | ||
25 | 25 | ||
26 | /* Two scan modes: unless there is one peer removed, just increase ot_peertime */ | 26 | /* Two scan modes: unless there is one peer removed, just increase ot_peertime */ |
27 | while( peers < last_peer ) { | 27 | while (peers < last_peer) { |
28 | if( ( timediff = timedout + OT_PEERTIME( peers ) ) >= OT_PEER_TIMEOUT ) | 28 | time_t timediff = timedout + OT_PEERTIME(peers, peer_size); |
29 | if (timediff >= OT_PEER_TIMEOUT) | ||
29 | break; | 30 | break; |
30 | OT_PEERTIME( peers++ ) = timediff; | 31 | OT_PEERTIME(peers, peer_size) = timediff; |
32 | peers += peer_size; | ||
33 | } | ||
34 | |||
35 | /* If we at least remove one peer, we have to copy */ | ||
36 | for (insert_point = peers; peers < last_peer; peers += peer_size) { | ||
37 | time_t timediff = timedout + OT_PEERTIME(peers, peer_size); | ||
38 | |||
39 | if (timediff < OT_PEER_TIMEOUT) { | ||
40 | OT_PEERTIME(peers, peer_size) = timediff; | ||
41 | memcpy(insert_point, peers, peer_size); | ||
42 | insert_point += peer_size; | ||
43 | } else if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING) | ||
44 | (*removed_seeders)++; | ||
31 | } | 45 | } |
32 | 46 | ||
33 | /* If we at least remove one peer, we have to copy */ | 47 | return (peers - insert_point) / peer_size; |
34 | insert_point = peers; | ||
35 | while( peers < last_peer ) | ||
36 | if( ( timediff = timedout + OT_PEERTIME( peers ) ) < OT_PEER_TIMEOUT ) { | ||
37 | OT_PEERTIME( peers ) = timediff; | ||
38 | memcpy( insert_point++, peers++, sizeof(ot_peer)); | ||
39 | } else | ||
40 | if( OT_PEERFLAG( peers++ ) & PEER_FLAG_SEEDING ) | ||
41 | (*removed_seeders)++; | ||
42 | |||
43 | return peers - insert_point; | ||
44 | } | 48 | } |
45 | 49 | ||
46 | /* Clean a single torrent | 50 | int clean_single_peer_list(ot_peerlist *peer_list, size_t peer_size) { |
47 | return 1 if torrent timed out | 51 | ot_vector *peer_vector = &peer_list->peers; |
48 | */ | 52 | time_t timedout = (time_t)(g_now_minutes - peer_list->base); |
49 | int clean_single_torrent( ot_torrent *torrent ) { | 53 | int num_buckets = 1, removed_seeders = 0; |
50 | ot_peerlist *peer_list = torrent->peer_list; | ||
51 | ot_vector *bucket_list = &peer_list->peers; | ||
52 | time_t timedout = (time_t)( g_now_minutes - peer_list->base ); | ||
53 | int num_buckets = 1, removed_seeders = 0; | ||
54 | 54 | ||
55 | /* No need to clean empty torrent */ | 55 | /* No need to clean empty torrent */ |
56 | if( !timedout ) | 56 | if (!timedout) |
57 | return 0; | 57 | return 0; |
58 | 58 | ||
59 | /* Torrent has idled out */ | 59 | /* Torrent has idled out */ |
60 | if( timedout > OT_TORRENT_TIMEOUT ) | 60 | if (timedout > OT_TORRENT_TIMEOUT) |
61 | return 1; | 61 | return 1; |
62 | 62 | ||
63 | /* Nothing to be cleaned here? Test if torrent is worth keeping */ | 63 | /* Nothing to be cleaned here? Test if torrent is worth keeping */ |
64 | if( timedout > OT_PEER_TIMEOUT ) { | 64 | if (timedout > OT_PEER_TIMEOUT) { |
65 | if( !peer_list->peer_count ) | 65 | if (!peer_list->peer_count) |
66 | return peer_list->down_count ? 0 : 1; | 66 | return peer_list->down_count ? 0 : 1; |
67 | timedout = OT_PEER_TIMEOUT; | 67 | timedout = OT_PEER_TIMEOUT; |
68 | } | 68 | } |
69 | 69 | ||
70 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { | 70 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
71 | num_buckets = bucket_list->size; | 71 | num_buckets = peer_vector->size; |
72 | bucket_list = (ot_vector *)bucket_list->data; | 72 | peer_vector = (ot_vector *)peer_vector->data; |
73 | } | 73 | } |
74 | 74 | ||
75 | while( num_buckets-- ) { | 75 | while (num_buckets--) { |
76 | size_t removed_peers = clean_single_bucket( bucket_list->data, bucket_list->size, timedout, &removed_seeders ); | 76 | size_t removed_peers = clean_single_bucket(peer_vector->data, peer_vector->size, peer_size, timedout, &removed_seeders); |
77 | peer_list->peer_count -= removed_peers; | 77 | peer_list->peer_count -= removed_peers; |
78 | bucket_list->size -= removed_peers; | 78 | peer_vector->size -= removed_peers; |
79 | if( bucket_list->size < removed_peers ) | 79 | if (removed_peers) |
80 | vector_fixup_peers( bucket_list ); | 80 | vector_fixup_peers(peer_vector, peer_size); |
81 | ++bucket_list; | 81 | |
82 | /* Skip to next bucket, a vector containing peers */ | ||
83 | ++peer_vector; | ||
82 | } | 84 | } |
83 | 85 | ||
84 | peer_list->seed_count -= removed_seeders; | 86 | peer_list->seed_count -= removed_seeders; |
85 | 87 | ||
86 | /* See, if we need to convert a torrent from simple vector to bucket list */ | 88 | /* See if we need to convert a torrent from simple vector to bucket list */ |
87 | if( ( peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT ) || OT_PEERLIST_HASBUCKETS(peer_list) ) | 89 | if ((peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT) || OT_PEERLIST_HASBUCKETS(peer_list)) |
88 | vector_redistribute_buckets( peer_list ); | 90 | vector_redistribute_buckets(peer_list, peer_size); |
89 | 91 | ||
90 | if( peer_list->peer_count ) | 92 | if (peer_list->peer_count) |
91 | peer_list->base = g_now_minutes; | 93 | peer_list->base = g_now_minutes; |
92 | else { | 94 | else { |
93 | /* When we got here, the last time that torrent | 95 | /* When we got here, the last time that torrent |
@@ -95,45 +97,48 @@ int clean_single_torrent( ot_torrent *torrent ) { | |||
95 | peer_list->base = g_now_minutes - OT_PEER_TIMEOUT; | 97 | peer_list->base = g_now_minutes - OT_PEER_TIMEOUT; |
96 | } | 98 | } |
97 | return 0; | 99 | return 0; |
100 | } | ||
98 | 101 | ||
102 | /* Clean a single torrent | ||
103 | return 1 if torrent timed out | ||
104 | */ | ||
105 | int clean_single_torrent(ot_torrent *torrent) { | ||
106 | return clean_single_peer_list(torrent->peer_list6, OT_PEER_SIZE6) * clean_single_peer_list(torrent->peer_list4, OT_PEER_SIZE4); | ||
99 | } | 107 | } |
100 | 108 | ||
101 | /* Clean up all peers in current bucket, remove timedout pools and | 109 | /* Clean up all peers in current bucket, remove timedout pools and |
102 | torrents */ | 110 | torrents */ |
103 | static void * clean_worker( void * args ) { | 111 | static void *clean_worker(void *args) { |
104 | (void) args; | 112 | (void)args; |
105 | while( 1 ) { | 113 | while (1) { |
106 | int bucket = OT_BUCKET_COUNT; | 114 | int bucket = OT_BUCKET_COUNT; |
107 | while( bucket-- ) { | 115 | while (bucket--) { |
108 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 116 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
109 | size_t toffs; | 117 | size_t toffs; |
110 | int delta_torrentcount = 0; | 118 | int delta_torrentcount = 0; |
111 | 119 | ||
112 | for( toffs=0; toffs<torrents_list->size; ++toffs ) { | 120 | for (toffs = 0; toffs < torrents_list->size; ++toffs) { |
113 | ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + toffs; | 121 | ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + toffs; |
114 | if( clean_single_torrent( torrent ) ) { | 122 | if (clean_single_torrent(torrent)) { |
115 | vector_remove_torrent( torrents_list, torrent ); | 123 | vector_remove_torrent(torrents_list, torrent); |
116 | --delta_torrentcount; | 124 | --delta_torrentcount; |
117 | --toffs; | 125 | --toffs; |
118 | } | 126 | } |
119 | } | 127 | } |
120 | mutex_bucket_unlock( bucket, delta_torrentcount ); | 128 | mutex_bucket_unlock(bucket, delta_torrentcount); |
121 | if( !g_opentracker_running ) | 129 | if (!g_opentracker_running) |
122 | return NULL; | 130 | return NULL; |
123 | usleep( OT_CLEAN_SLEEP ); | 131 | usleep(OT_CLEAN_SLEEP); |
124 | } | 132 | } |
125 | stats_cleanup(); | 133 | stats_cleanup(); |
134 | #ifdef WANT_ACCESSLIST | ||
135 | accesslist_cleanup(); | ||
136 | #endif | ||
126 | } | 137 | } |
127 | return NULL; | 138 | return NULL; |
128 | } | 139 | } |
129 | 140 | ||
130 | static pthread_t thread_id; | 141 | static pthread_t thread_id; |
131 | void clean_init( void ) { | 142 | void clean_init(void) { pthread_create(&thread_id, NULL, clean_worker, NULL); } |
132 | pthread_create( &thread_id, NULL, clean_worker, NULL ); | ||
133 | } | ||
134 | |||
135 | void clean_deinit( void ) { | ||
136 | pthread_cancel( thread_id ); | ||
137 | } | ||
138 | 143 | ||
139 | const char *g_version_clean_c = "$Source$: $Revision$\n"; | 144 | void clean_deinit(void) { pthread_cancel(thread_id); } |
@@ -7,13 +7,13 @@ | |||
7 | #define OT_CLEAN_H__ | 7 | #define OT_CLEAN_H__ |
8 | 8 | ||
9 | /* The amount of time a clean cycle should take */ | 9 | /* The amount of time a clean cycle should take */ |
10 | #define OT_CLEAN_INTERVAL_MINUTES 2 | 10 | #define OT_CLEAN_INTERVAL_MINUTES 2 |
11 | 11 | ||
12 | /* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */ | 12 | /* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */ |
13 | #define OT_CLEAN_SLEEP ( ( ( OT_CLEAN_INTERVAL_MINUTES ) * 60 * 1000000 ) / ( OT_BUCKET_COUNT ) ) | 13 | #define OT_CLEAN_SLEEP (((OT_CLEAN_INTERVAL_MINUTES) * 60 * 1000000) / (OT_BUCKET_COUNT)) |
14 | 14 | ||
15 | void clean_init( void ); | 15 | void clean_init(void); |
16 | void clean_deinit( void ); | 16 | void clean_deinit(void); |
17 | int clean_single_torrent( ot_torrent *torrent ); | 17 | int clean_single_torrent(ot_torrent *torrent); |
18 | 18 | ||
19 | #endif | 19 | #endif |
diff --git a/ot_fullscrape.c b/ot_fullscrape.c index 5d115dc..6fd6d1c 100644 --- a/ot_fullscrape.c +++ b/ot_fullscrape.c | |||
@@ -6,14 +6,18 @@ | |||
6 | #ifdef WANT_FULLSCRAPE | 6 | #ifdef WANT_FULLSCRAPE |
7 | 7 | ||
8 | /* System */ | 8 | /* System */ |
9 | #include <sys/param.h> | 9 | #include <arpa/inet.h> |
10 | #include <pthread.h> | ||
10 | #include <stdio.h> | 11 | #include <stdio.h> |
11 | #include <string.h> | 12 | #include <string.h> |
12 | #include <pthread.h> | 13 | #include <sys/param.h> |
13 | #include <arpa/inet.h> | ||
14 | #ifdef WANT_COMPRESSION_GZIP | 14 | #ifdef WANT_COMPRESSION_GZIP |
15 | #include <zlib.h> | 15 | #include <zlib.h> |
16 | #endif | 16 | #endif |
17 | #ifdef WANT_COMPRESSION_ZSTD | ||
18 | #include <zstd.h> | ||
19 | #endif | ||
20 | |||
17 | 21 | ||
18 | /* Libowfat */ | 22 | /* Libowfat */ |
19 | #include "byte.h" | 23 | #include "byte.h" |
@@ -21,50 +25,64 @@ | |||
21 | #include "textcode.h" | 25 | #include "textcode.h" |
22 | 26 | ||
23 | /* Opentracker */ | 27 | /* Opentracker */ |
24 | #include "trackerlogic.h" | ||
25 | #include "ot_mutex.h" | ||
26 | #include "ot_iovec.h" | ||
27 | #include "ot_fullscrape.h" | 28 | #include "ot_fullscrape.h" |
29 | #include "ot_iovec.h" | ||
30 | #include "ot_mutex.h" | ||
31 | #include "trackerlogic.h" | ||
28 | 32 | ||
29 | /* Fetch full scrape info for all torrents | 33 | /* Fetch full scrape info for all torrents |
30 | Full scrapes usually are huge and one does not want to | 34 | Full scrapes usually are huge and one does not want to |
31 | allocate more memory. So lets get them in 512k units | 35 | allocate more memory. So lets get them in 512k units |
32 | */ | 36 | */ |
33 | #define OT_SCRAPE_CHUNK_SIZE (1024*1024) | 37 | #define OT_SCRAPE_CHUNK_SIZE (1024 * 1024) |
34 | 38 | ||
35 | /* "d8:completei%zde10:downloadedi%zde10:incompletei%zdee" */ | 39 | /* "d8:completei%zde10:downloadedi%zde10:incompletei%zdee" */ |
36 | #define OT_SCRAPE_MAXENTRYLEN 256 | 40 | #define OT_SCRAPE_MAXENTRYLEN 256 |
37 | 41 | ||
38 | /* Forward declaration */ | 42 | /* Forward declaration */ |
39 | static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ); | 43 | static void fullscrape_make(int taskid, ot_tasktype mode); |
40 | #ifdef WANT_COMPRESSION_GZIP | 44 | #ifdef WANT_COMPRESSION_GZIP |
41 | static void fullscrape_make_gzip( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ); | 45 | static void fullscrape_make_gzip(int taskid, ot_tasktype mode); |
46 | #endif | ||
47 | #ifdef WANT_COMPRESSION_ZSTD | ||
48 | static void fullscrape_make_zstd(int taskid, ot_tasktype mode); | ||
42 | #endif | 49 | #endif |
43 | 50 | ||
44 | /* Converter function from memory to human readable hex strings | 51 | /* Converter function from memory to human readable hex strings |
45 | XXX - Duplicated from ot_stats. Needs fix. */ | 52 | XXX - Duplicated from ot_stats. Needs fix. */ |
46 | static char*to_hex(char*d,uint8_t*s){char*m="0123456789ABCDEF";char *t=d;char*e=d+40;while(d<e){*d++=m[*s>>4];*d++=m[*s++&15];}*d=0;return t;} | 53 | static char *to_hex(char *d, uint8_t *s) { |
54 | char *m = "0123456789ABCDEF"; | ||
55 | char *t = d; | ||
56 | char *e = d + 40; | ||
57 | while (d < e) { | ||
58 | *d++ = m[*s >> 4]; | ||
59 | *d++ = m[*s++ & 15]; | ||
60 | } | ||
61 | *d = 0; | ||
62 | return t; | ||
63 | } | ||
47 | 64 | ||
48 | /* This is the entry point into this worker thread | 65 | /* This is the entry point into this worker thread |
49 | It grabs tasks from mutex_tasklist and delivers results back | 66 | It grabs tasks from mutex_tasklist and delivers results back |
50 | */ | 67 | */ |
51 | static void * fullscrape_worker( void * args ) { | 68 | static void *fullscrape_worker(void *args) { |
52 | int iovec_entries; | 69 | (void)args; |
53 | struct iovec *iovector; | ||
54 | 70 | ||
55 | (void) args; | 71 | while (g_opentracker_running) { |
56 | |||
57 | while( g_opentracker_running ) { | ||
58 | ot_tasktype tasktype = TASK_FULLSCRAPE; | 72 | ot_tasktype tasktype = TASK_FULLSCRAPE; |
59 | ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); | 73 | ot_taskid taskid = mutex_workqueue_poptask(&tasktype); |
74 | #ifdef WANT_COMPRESSION_ZSTD | ||
75 | if (tasktype & TASK_FLAG_ZSTD) | ||
76 | fullscrape_make_zstd(taskid, tasktype); | ||
77 | else | ||
78 | #endif | ||
60 | #ifdef WANT_COMPRESSION_GZIP | 79 | #ifdef WANT_COMPRESSION_GZIP |
61 | if (tasktype & TASK_FLAG_GZIP) | 80 | if (tasktype & TASK_FLAG_GZIP) |
62 | fullscrape_make_gzip( &iovec_entries, &iovector, tasktype ); | 81 | fullscrape_make_gzip(taskid, tasktype); |
63 | else | 82 | else |
64 | #endif | 83 | #endif |
65 | fullscrape_make( &iovec_entries, &iovector, tasktype ); | 84 | fullscrape_make(taskid, tasktype); |
66 | if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) | 85 | mutex_workqueue_pushchunked(taskid, NULL); |
67 | iovec_free( &iovec_entries, &iovector ); | ||
68 | } | 86 | } |
69 | return NULL; | 87 | return NULL; |
70 | } | 88 | } |
@@ -82,76 +100,92 @@ void fullscrape_deliver( int64 sock, ot_tasktype tasktype ) { | |||
82 | mutex_workqueue_pushtask( sock, tasktype ); | 100 | mutex_workqueue_pushtask( sock, tasktype ); |
83 | } | 101 | } |
84 | 102 | ||
85 | static char * fullscrape_write_one( ot_tasktype mode, char *r, ot_peerlist *peer_list, ot_hash *hash ) { | 103 | static char * fullscrape_write_one( ot_tasktype mode, char *r, ot_torrent *torrent, ot_hash *hash ) { |
86 | switch( mode & TASK_TASK_MASK ) { | 104 | size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; |
87 | case TASK_FULLSCRAPE: | 105 | size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; |
88 | default: | 106 | size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count; |
89 | /* push hash as bencoded string */ | 107 | |
90 | *r++='2'; *r++='0'; *r++=':'; | 108 | switch (mode & TASK_TASK_MASK) { |
91 | memcpy( r, hash, sizeof(ot_hash) ); r += sizeof(ot_hash); | 109 | case TASK_FULLSCRAPE: |
92 | /* push rest of the scrape string */ | 110 | default: |
93 | r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count ); | 111 | /* push hash as bencoded string */ |
94 | 112 | *r++ = '2'; | |
95 | break; | 113 | *r++ = '0'; |
96 | case TASK_FULLSCRAPE_TPB_ASCII: | 114 | *r++ = ':'; |
97 | to_hex( r, *hash ); r+= 2 * sizeof(ot_hash); | 115 | memcpy(r, hash, sizeof(ot_hash)); |
98 | r += sprintf( r, ":%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count ); | 116 | r += sizeof(ot_hash); |
99 | break; | 117 | /* push rest of the scrape string */ |
100 | case TASK_FULLSCRAPE_TPB_ASCII_PLUS: | 118 | r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", seed_count, down_count, peer_count - seed_count); |
101 | to_hex( r, *hash ); r+= 2 * sizeof(ot_hash); | 119 | |
102 | r += sprintf( r, ":%zd:%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count, peer_list->down_count ); | 120 | break; |
103 | break; | 121 | case TASK_FULLSCRAPE_TPB_ASCII: |
104 | case TASK_FULLSCRAPE_TPB_BINARY: | 122 | to_hex(r, *hash); |
105 | memcpy( r, *hash, sizeof(ot_hash) ); r += sizeof(ot_hash); | 123 | r += 2 * sizeof(ot_hash); |
106 | *(uint32_t*)(r+0) = htonl( (uint32_t) peer_list->seed_count ); | 124 | r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count); |
107 | *(uint32_t*)(r+4) = htonl( (uint32_t)( peer_list->peer_count-peer_list->seed_count) ); | 125 | break; |
108 | r+=8; | 126 | case TASK_FULLSCRAPE_TPB_ASCII_PLUS: |
109 | break; | 127 | to_hex(r, *hash); |
110 | case TASK_FULLSCRAPE_TPB_URLENCODED: | 128 | r += 2 * sizeof(ot_hash); |
111 | r += fmt_urlencoded( r, (char *)*hash, 20 ); | 129 | r += sprintf(r, ":%zd:%zd:%zd\n", seed_count, peer_count - seed_count, down_count); |
112 | r += sprintf( r, ":%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count ); | 130 | break; |
113 | break; | 131 | case TASK_FULLSCRAPE_TPB_BINARY: |
114 | case TASK_FULLSCRAPE_TRACKERSTATE: | 132 | memcpy(r, *hash, sizeof(ot_hash)); |
115 | to_hex( r, *hash ); r+= 2 * sizeof(ot_hash); | 133 | r += sizeof(ot_hash); |
116 | r += sprintf( r, ":%zd:%zd\n", peer_list->base, peer_list->down_count ); | 134 | *(uint32_t *)(r + 0) = htonl((uint32_t)seed_count); |
117 | break; | 135 | *(uint32_t *)(r + 4) = htonl((uint32_t)(peer_count - seed_count)); |
118 | } | 136 | r += 8; |
119 | return r; | 137 | break; |
138 | case TASK_FULLSCRAPE_TPB_URLENCODED: | ||
139 | r += fmt_urlencoded(r, (char *)*hash, 20); | ||
140 | r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count); | ||
141 | break; | ||
142 | case TASK_FULLSCRAPE_TRACKERSTATE: | ||
143 | to_hex(r, *hash); | ||
144 | r += 2 * sizeof(ot_hash); | ||
145 | r += sprintf(r, ":%zd:%zd\n", torrent->peer_list6->base, down_count); | ||
146 | break; | ||
147 | } | ||
148 | return r; | ||
120 | } | 149 | } |
121 | 150 | ||
122 | static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) { | 151 | static void fullscrape_make(int taskid, ot_tasktype mode) { |
123 | int bucket; | 152 | int bucket; |
124 | char *r, *re; | 153 | char *r, *re; |
154 | struct iovec iovector = {NULL, 0}; | ||
125 | 155 | ||
126 | /* Setup return vector... */ | 156 | /* Setup return vector... */ |
127 | *iovec_entries = 0; | 157 | r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); |
128 | *iovector = NULL; | 158 | if (!r) |
129 | r = iovec_increase( iovec_entries, iovector, OT_SCRAPE_CHUNK_SIZE ); | ||
130 | if( !r ) | ||
131 | return; | 159 | return; |
132 | 160 | ||
133 | /* re points to low watermark */ | 161 | /* re points to low watermark */ |
134 | re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; | 162 | re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; |
135 | 163 | ||
136 | if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) | 164 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) |
137 | r += sprintf( r, "d5:filesd" ); | 165 | r += sprintf(r, "d5:filesd"); |
138 | 166 | ||
139 | /* For each bucket... */ | 167 | /* For each bucket... */ |
140 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 168 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
141 | /* Get exclusive access to that bucket */ | 169 | /* Get exclusive access to that bucket */ |
142 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 170 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
143 | ot_torrent *torrents = (ot_torrent*)(torrents_list->data); | 171 | ot_torrent *torrents = (ot_torrent *)(torrents_list->data); |
144 | size_t i; | 172 | size_t i; |
145 | 173 | ||
146 | /* For each torrent in this bucket.. */ | 174 | /* For each torrent in this bucket.. */ |
147 | for( i=0; i<torrents_list->size; ++i ) { | 175 | for (i = 0; i < torrents_list->size; ++i) { |
148 | r = fullscrape_write_one( mode, r, torrents[i].peer_list, &torrents[i].hash ); | 176 | r = fullscrape_write_one(mode, r, torrents + i, &torrents[i].hash); |
149 | 177 | ||
150 | if( r > re) { | 178 | if (r > re) { |
151 | /* Allocate a fresh output buffer at the end of our buffers list */ | 179 | iovector.iov_len = r - (char *)iovector.iov_base; |
152 | r = iovec_fix_increase_or_free( iovec_entries, iovector, r, OT_SCRAPE_CHUNK_SIZE ); | 180 | |
153 | if( !r ) | 181 | if (mutex_workqueue_pushchunked(taskid, &iovector)) { |
154 | return mutex_bucket_unlock( bucket, 0 ); | 182 | free(iovector.iov_base); |
183 | return mutex_bucket_unlock(bucket, 0); | ||
184 | } | ||
185 | /* Allocate a fresh output buffer */ | ||
186 | r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
187 | if (!r) | ||
188 | return mutex_bucket_unlock(bucket, 0); | ||
155 | 189 | ||
156 | /* re points to low watermark */ | 190 | /* re points to low watermark */ |
157 | re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; | 191 | re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; |
@@ -159,125 +193,265 @@ static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tas | |||
159 | } | 193 | } |
160 | 194 | ||
161 | /* All torrents done: release lock on current bucket */ | 195 | /* All torrents done: release lock on current bucket */ |
162 | mutex_bucket_unlock( bucket, 0 ); | 196 | mutex_bucket_unlock(bucket, 0); |
163 | 197 | ||
164 | /* Parent thread died? */ | 198 | /* Parent thread died? */ |
165 | if( !g_opentracker_running ) | 199 | if (!g_opentracker_running) |
166 | return; | 200 | return; |
167 | } | 201 | } |
168 | 202 | ||
169 | if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) | 203 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) |
170 | r += sprintf( r, "ee" ); | 204 | r += sprintf(r, "ee"); |
171 | 205 | ||
172 | /* Release unused memory in current output buffer */ | 206 | /* Send rest of data */ |
173 | iovec_fixlast( iovec_entries, iovector, r ); | 207 | iovector.iov_len = r - (char *)iovector.iov_base; |
208 | if (mutex_workqueue_pushchunked(taskid, &iovector)) | ||
209 | free(iovector.iov_base); | ||
174 | } | 210 | } |
175 | 211 | ||
176 | #ifdef WANT_COMPRESSION_GZIP | 212 | #ifdef WANT_COMPRESSION_GZIP |
177 | 213 | ||
178 | static void fullscrape_make_gzip( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) { | 214 | static void fullscrape_make_gzip(int taskid, ot_tasktype mode) { |
179 | int bucket; | 215 | int bucket; |
180 | char *r; | 216 | char *r; |
181 | int zres; | 217 | struct iovec iovector = {NULL, 0}; |
182 | z_stream strm; | 218 | int zres; |
183 | 219 | z_stream strm; | |
184 | /* Setup return vector... */ | 220 | /* Setup return vector... */ |
185 | *iovec_entries = 0; | 221 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); |
186 | *iovector = NULL; | 222 | if (!iovector.iov_base) |
187 | r = iovec_increase( iovec_entries, iovector, OT_SCRAPE_CHUNK_SIZE ); | ||
188 | if( !r ) | ||
189 | return; | 223 | return; |
190 | 224 | ||
191 | byte_zero( &strm, sizeof(strm) ); | 225 | byte_zero(&strm, sizeof(strm)); |
192 | strm.next_out = (uint8_t*)r; | 226 | strm.next_out = (uint8_t *)iovector.iov_base; |
193 | strm.avail_out = OT_SCRAPE_CHUNK_SIZE; | 227 | strm.avail_out = OT_SCRAPE_CHUNK_SIZE; |
194 | if( deflateInit2(&strm,7,Z_DEFLATED,31,9,Z_DEFAULT_STRATEGY) != Z_OK ) | 228 | if (deflateInit2(&strm, 7, Z_DEFLATED, 31, 9, Z_DEFAULT_STRATEGY) != Z_OK) |
195 | fprintf( stderr, "not ok.\n" ); | 229 | fprintf(stderr, "not ok.\n"); |
196 | 230 | ||
197 | if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) { | 231 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) { |
198 | strm.next_in = (uint8_t*)"d5:filesd"; | 232 | strm.next_in = (uint8_t *)"d5:filesd"; |
199 | strm.avail_in = strlen("d5:filesd"); | 233 | strm.avail_in = strlen("d5:filesd"); |
200 | zres = deflate( &strm, Z_NO_FLUSH ); | 234 | zres = deflate(&strm, Z_NO_FLUSH); |
201 | } | 235 | } |
202 | 236 | ||
203 | /* For each bucket... */ | 237 | /* For each bucket... */ |
204 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 238 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
205 | /* Get exclusive access to that bucket */ | 239 | /* Get exclusive access to that bucket */ |
206 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 240 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
207 | ot_torrent *torrents = (ot_torrent*)(torrents_list->data); | 241 | ot_torrent *torrents = (ot_torrent *)(torrents_list->data); |
208 | size_t i; | 242 | size_t i; |
209 | 243 | ||
210 | /* For each torrent in this bucket.. */ | 244 | /* For each torrent in this bucket.. */ |
211 | for( i=0; i<torrents_list->size; ++i ) { | 245 | for (i = 0; i < torrents_list->size; ++i) { |
212 | char compress_buffer[OT_SCRAPE_MAXENTRYLEN]; | 246 | char compress_buffer[OT_SCRAPE_MAXENTRYLEN]; |
213 | r = fullscrape_write_one( mode, compress_buffer, torrents[i].peer_list, &torrents[i].hash ); | 247 | r = fullscrape_write_one(mode, compress_buffer, torrents + i, &torrents[i].hash); |
214 | strm.next_in = (uint8_t*)compress_buffer; | 248 | strm.next_in = (uint8_t *)compress_buffer; |
215 | strm.avail_in = r - compress_buffer; | 249 | strm.avail_in = r - compress_buffer; |
216 | zres = deflate( &strm, Z_NO_FLUSH ); | 250 | zres = deflate(&strm, Z_NO_FLUSH); |
217 | if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) ) | 251 | if ((zres < Z_OK) && (zres != Z_BUF_ERROR)) |
218 | fprintf( stderr, "deflate() failed while in fullscrape_make().\n" ); | 252 | fprintf(stderr, "deflate() failed while in fullscrape_make().\n"); |
219 | 253 | ||
220 | /* Check if there still is enough buffer left */ | 254 | /* Check if there still is enough buffer left */ |
221 | while( !strm.avail_out ) { | 255 | while (!strm.avail_out) { |
222 | /* Allocate a fresh output buffer at the end of our buffers list */ | 256 | iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base; |
223 | r = iovec_increase( iovec_entries, iovector, OT_SCRAPE_CHUNK_SIZE ); | 257 | |
224 | if( !r ) { | 258 | if (mutex_workqueue_pushchunked(taskid, &iovector)) { |
225 | fprintf( stderr, "Problem with iovec_fix_increase_or_free\n" ); | 259 | free(iovector.iov_base); |
226 | iovec_free( iovec_entries, iovector ); | 260 | return mutex_bucket_unlock(bucket, 0); |
261 | } | ||
262 | /* Allocate a fresh output buffer */ | ||
263 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
264 | if (!iovector.iov_base) { | ||
265 | fprintf(stderr, "Out of memory trying to claim ouput buffer\n"); | ||
227 | deflateEnd(&strm); | 266 | deflateEnd(&strm); |
228 | return mutex_bucket_unlock( bucket, 0 ); | 267 | return mutex_bucket_unlock(bucket, 0); |
229 | } | 268 | } |
230 | strm.next_out = (uint8_t*)r; | 269 | strm.next_out = (uint8_t *)iovector.iov_base; |
231 | strm.avail_out = OT_SCRAPE_CHUNK_SIZE; | 270 | strm.avail_out = OT_SCRAPE_CHUNK_SIZE; |
232 | zres = deflate( &strm, Z_NO_FLUSH ); | 271 | zres = deflate(&strm, Z_NO_FLUSH); |
233 | if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) ) | 272 | if ((zres < Z_OK) && (zres != Z_BUF_ERROR)) |
234 | fprintf( stderr, "deflate() failed while in fullscrape_make().\n" ); | 273 | fprintf(stderr, "deflate() failed while in fullscrape_make().\n"); |
235 | } | 274 | } |
236 | } | 275 | } |
237 | 276 | ||
238 | /* All torrents done: release lock on current bucket */ | 277 | /* All torrents done: release lock on current bucket */ |
239 | mutex_bucket_unlock( bucket, 0 ); | 278 | mutex_bucket_unlock(bucket, 0); |
240 | 279 | ||
241 | /* Parent thread died? */ | 280 | /* Parent thread died? */ |
242 | if( !g_opentracker_running ) | 281 | if (!g_opentracker_running) { |
282 | deflateEnd(&strm); | ||
243 | return; | 283 | return; |
284 | } | ||
244 | } | 285 | } |
245 | 286 | ||
246 | if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) { | 287 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) { |
247 | strm.next_in = (uint8_t*)"ee"; | 288 | strm.next_in = (uint8_t *)"ee"; |
248 | strm.avail_in = strlen("ee"); | 289 | strm.avail_in = strlen("ee"); |
249 | } | 290 | } |
250 | 291 | ||
251 | if( deflate( &strm, Z_FINISH ) < Z_OK ) | 292 | if (deflate(&strm, Z_FINISH) < Z_OK) |
252 | fprintf( stderr, "deflate() failed while in fullscrape_make()'s endgame.\n" ); | 293 | fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n"); |
253 | |||
254 | if( !strm.avail_out ) { | ||
255 | unsigned int pending; | ||
256 | int bits; | ||
257 | deflatePending( &strm, &pending, &bits); | ||
258 | pending += ( bits ? 1 : 0 ); | ||
259 | 294 | ||
260 | /* Allocate a fresh output buffer at the end of our buffers list */ | 295 | iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base; |
261 | r = iovec_fix_increase_or_free( iovec_entries, iovector, strm.next_out, pending ); | 296 | if (mutex_workqueue_pushchunked(taskid, &iovector)) { |
262 | if( !r ) { | 297 | free(iovector.iov_base); |
263 | fprintf( stderr, "Problem with iovec_fix_increase_or_free\n" ); | 298 | deflateEnd(&strm); |
264 | deflateEnd(&strm); | 299 | return; |
265 | return mutex_bucket_unlock( bucket, 0 ); | ||
266 | } | ||
267 | strm.next_out = (uint8_t*)r; | ||
268 | strm.avail_out = pending; | ||
269 | if( deflate( &strm, Z_FINISH ) < Z_OK ) | ||
270 | fprintf( stderr, "deflate() failed while in fullscrape_make()'s endgame.\n" ); | ||
271 | } | 300 | } |
272 | 301 | ||
273 | /* Release unused memory in current output buffer */ | 302 | /* Check if there's a last batch of data in the zlib buffer */ |
274 | iovec_fixlast( iovec_entries, iovector, strm.next_out ); | 303 | if (!strm.avail_out) { |
304 | /* Allocate a fresh output buffer */ | ||
305 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
306 | |||
307 | if (!iovector.iov_base) { | ||
308 | fprintf(stderr, "Problem with iovec_fix_increase_or_free\n"); | ||
309 | deflateEnd(&strm); | ||
310 | return; | ||
311 | } | ||
312 | strm.next_out = iovector.iov_base; | ||
313 | strm.avail_out = OT_SCRAPE_CHUNK_SIZE; | ||
314 | if (deflate(&strm, Z_FINISH) < Z_OK) | ||
315 | fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n"); | ||
316 | |||
317 | /* Only pass the new buffer if there actually was some data left in the buffer */ | ||
318 | iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base; | ||
319 | if (!iovector.iov_len || mutex_workqueue_pushchunked(taskid, &iovector)) | ||
320 | free(iovector.iov_base); | ||
321 | } | ||
275 | 322 | ||
276 | deflateEnd(&strm); | 323 | deflateEnd(&strm); |
277 | } | 324 | } |
278 | /* WANT_COMPRESSION_GZIP */ | 325 | /* WANT_COMPRESSION_GZIP */ |
279 | #endif | 326 | #endif |
280 | 327 | ||
328 | #ifdef WANT_COMPRESSION_ZSTD | ||
329 | |||
330 | static void fullscrape_make_zstd(int taskid, ot_tasktype mode) { | ||
331 | int bucket; | ||
332 | char *r; | ||
333 | struct iovec iovector = {NULL, 0}; | ||
334 | ZSTD_CCtx *zstream = ZSTD_createCCtx(); | ||
335 | ZSTD_inBuffer inbuf; | ||
336 | ZSTD_outBuffer outbuf; | ||
337 | size_t more_bytes; | ||
338 | |||
339 | if (!zstream) | ||
340 | return; | ||
341 | |||
342 | /* Setup return vector... */ | ||
343 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
344 | if (!iovector.iov_base) { | ||
345 | ZSTD_freeCCtx(zstream); | ||
346 | return; | ||
347 | } | ||
348 | |||
349 | /* Working with a compression level 6 is half as fast as level 3, but | ||
350 | seems to be the last reasonable bump that's worth extra cpu */ | ||
351 | ZSTD_CCtx_setParameter(zstream, ZSTD_c_compressionLevel, 6); | ||
352 | |||
353 | outbuf.dst = iovector.iov_base; | ||
354 | outbuf.size = OT_SCRAPE_CHUNK_SIZE; | ||
355 | outbuf.pos = 0; | ||
356 | |||
357 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) { | ||
358 | inbuf.src = (const void *)"d5:filesd"; | ||
359 | inbuf.size = strlen("d5:filesd"); | ||
360 | inbuf.pos = 0; | ||
361 | ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue); | ||
362 | } | ||
363 | |||
364 | /* For each bucket... */ | ||
365 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { | ||
366 | /* Get exclusive access to that bucket */ | ||
367 | ot_vector *torrents_list = mutex_bucket_lock(bucket); | ||
368 | ot_torrent *torrents = (ot_torrent *)(torrents_list->data); | ||
369 | size_t i; | ||
370 | |||
371 | /* For each torrent in this bucket.. */ | ||
372 | for (i = 0; i < torrents_list->size; ++i) { | ||
373 | char compress_buffer[OT_SCRAPE_MAXENTRYLEN]; | ||
374 | r = fullscrape_write_one(mode, compress_buffer, torrents + i, &torrents[i].hash); | ||
375 | inbuf.src = compress_buffer; | ||
376 | inbuf.size = r - compress_buffer; | ||
377 | inbuf.pos = 0; | ||
378 | ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue); | ||
379 | |||
380 | /* Check if there still is enough buffer left */ | ||
381 | while (outbuf.pos + OT_SCRAPE_MAXENTRYLEN > outbuf.size) { | ||
382 | iovector.iov_len = outbuf.size; | ||
383 | |||
384 | if (mutex_workqueue_pushchunked(taskid, &iovector)) { | ||
385 | free(iovector.iov_base); | ||
386 | ZSTD_freeCCtx(zstream); | ||
387 | return mutex_bucket_unlock(bucket, 0); | ||
388 | } | ||
389 | /* Allocate a fresh output buffer */ | ||
390 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
391 | if (!iovector.iov_base) { | ||
392 | fprintf(stderr, "Out of memory trying to claim ouput buffer\n"); | ||
393 | ZSTD_freeCCtx(zstream); | ||
394 | return mutex_bucket_unlock(bucket, 0); | ||
395 | } | ||
396 | |||
397 | outbuf.dst = iovector.iov_base; | ||
398 | outbuf.size = OT_SCRAPE_CHUNK_SIZE; | ||
399 | outbuf.pos = 0; | ||
400 | |||
401 | ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | /* All torrents done: release lock on current bucket */ | ||
406 | mutex_bucket_unlock(bucket, 0); | ||
407 | |||
408 | /* Parent thread died? */ | ||
409 | if (!g_opentracker_running) | ||
410 | return; | ||
411 | } | ||
412 | |||
413 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) { | ||
414 | inbuf.src = (const void *)"ee"; | ||
415 | inbuf.size = strlen("ee"); | ||
416 | inbuf.pos = 0; | ||
417 | } | ||
418 | |||
419 | more_bytes = ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_end); | ||
420 | |||
421 | iovector.iov_len = outbuf.pos; | ||
422 | if (mutex_workqueue_pushchunked(taskid, &iovector)) { | ||
423 | free(iovector.iov_base); | ||
424 | ZSTD_freeCCtx(zstream); | ||
425 | return; | ||
426 | } | ||
427 | |||
428 | /* Check if there's a last batch of data in the zlib buffer */ | ||
429 | if (more_bytes) { | ||
430 | /* Allocate a fresh output buffer */ | ||
431 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
432 | |||
433 | if (!iovector.iov_base) { | ||
434 | fprintf(stderr, "Problem with iovec_fix_increase_or_free\n"); | ||
435 | ZSTD_freeCCtx(zstream); | ||
436 | return; | ||
437 | } | ||
438 | |||
439 | outbuf.dst = iovector.iov_base; | ||
440 | outbuf.size = OT_SCRAPE_CHUNK_SIZE; | ||
441 | outbuf.pos = 0; | ||
442 | |||
443 | ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_end); | ||
444 | |||
445 | /* Only pass the new buffer if there actually was some data left in the buffer */ | ||
446 | iovector.iov_len = outbuf.pos; | ||
447 | if (!iovector.iov_len || mutex_workqueue_pushchunked(taskid, &iovector)) | ||
448 | free(iovector.iov_base); | ||
449 | } | ||
450 | |||
451 | ZSTD_freeCCtx(zstream); | ||
452 | } | ||
453 | /* WANT_COMPRESSION_ZSTD */ | ||
454 | #endif | ||
455 | |||
281 | /* WANT_FULLSCRAPE */ | 456 | /* WANT_FULLSCRAPE */ |
282 | #endif | 457 | #endif |
283 | const char *g_version_fullscrape_c = "$Source$: $Revision$\n"; | ||
diff --git a/ot_fullscrape.h b/ot_fullscrape.h index 0f920ec..bbb2a3f 100644 --- a/ot_fullscrape.h +++ b/ot_fullscrape.h | |||
@@ -8,9 +8,11 @@ | |||
8 | 8 | ||
9 | #ifdef WANT_FULLSCRAPE | 9 | #ifdef WANT_FULLSCRAPE |
10 | 10 | ||
11 | void fullscrape_init( ); | 11 | #include "ot_mutex.h" |
12 | void fullscrape_deinit( ); | 12 | |
13 | void fullscrape_deliver( int64 sock, ot_tasktype tasktype ); | 13 | void fullscrape_init(); |
14 | void fullscrape_deinit(); | ||
15 | void fullscrape_deliver(int64 sock, ot_tasktype tasktype); | ||
14 | 16 | ||
15 | #else | 17 | #else |
16 | 18 | ||
@@ -4,530 +4,645 @@ | |||
4 | $id$ */ | 4 | $id$ */ |
5 | 5 | ||
6 | /* System */ | 6 | /* System */ |
7 | #include <sys/types.h> | ||
8 | #include <arpa/inet.h> | 7 | #include <arpa/inet.h> |
9 | #include <stdlib.h> | 8 | #include <pthread.h> |
9 | #define _GNU_SOURCE | ||
10 | #include <stdio.h> | 10 | #include <stdio.h> |
11 | #include <stdlib.h> | ||
11 | #include <string.h> | 12 | #include <string.h> |
13 | #include <sys/types.h> | ||
12 | #include <unistd.h> | 14 | #include <unistd.h> |
13 | #include <pthread.h> | ||
14 | 15 | ||
15 | /* Libowfat */ | 16 | /* Libowfat */ |
16 | #include "byte.h" | ||
17 | #include "array.h" | 17 | #include "array.h" |
18 | #include "byte.h" | ||
19 | #include "case.h" | ||
18 | #include "iob.h" | 20 | #include "iob.h" |
19 | #include "ip6.h" | 21 | #include "ip6.h" |
20 | #include "scan.h" | 22 | #include "scan.h" |
21 | #include "case.h" | ||
22 | 23 | ||
23 | /* Opentracker */ | 24 | /* Opentracker */ |
24 | #include "trackerlogic.h" | 25 | #include "ot_accesslist.h" |
25 | #include "ot_mutex.h" | 26 | #include "ot_fullscrape.h" |
26 | #include "ot_http.h" | 27 | #include "ot_http.h" |
27 | #include "ot_iovec.h" | 28 | #include "ot_iovec.h" |
28 | #include "scan_urlencoded_query.h" | 29 | #include "ot_mutex.h" |
29 | #include "ot_fullscrape.h" | ||
30 | #include "ot_stats.h" | 30 | #include "ot_stats.h" |
31 | #include "ot_accesslist.h" | 31 | #include "scan_urlencoded_query.h" |
32 | #include "trackerlogic.h" | ||
33 | |||
34 | #ifdef WANT_NO_AUTO_FREE | ||
35 | #define OT_IOB_INIT(B) bzero(B, sizeof(io_batch)) | ||
36 | #else | ||
37 | #define OT_IOB_INIT(B) iob_init_autofree(B, 0) | ||
38 | #endif | ||
32 | 39 | ||
33 | #define OT_MAXMULTISCRAPE_COUNT 64 | 40 | #define OT_MAXMULTISCRAPE_COUNT 64 |
34 | #define OT_BATCH_LIMIT (1024*1024*16) | 41 | #define OT_BATCH_LIMIT (1024 * 1024 * 16) |
35 | extern char *g_redirecturl; | 42 | extern char *g_redirecturl; |
36 | 43 | ||
37 | char *g_stats_path; | 44 | char *g_stats_path; |
38 | ssize_t g_stats_path_len; | 45 | ssize_t g_stats_path_len; |
39 | 46 | ||
40 | enum { | 47 | enum { SUCCESS_HTTP_HEADER_LENGTH = 80, SUCCESS_HTTP_SIZE_OFF = 17 }; |
41 | SUCCESS_HTTP_HEADER_LENGTH = 80, | ||
42 | SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING = 32, | ||
43 | SUCCESS_HTTP_SIZE_OFF = 17 }; | ||
44 | 48 | ||
45 | static void http_senddata( const int64 sock, struct ot_workstruct *ws ) { | 49 | static void http_senddata(const int64 sock, struct ot_workstruct *ws) { |
46 | struct http_data *cookie = io_getcookie( sock ); | 50 | struct http_data *cookie = io_getcookie(sock); |
47 | ssize_t written_size; | 51 | ssize_t written_size; |
48 | 52 | ||
49 | if( !cookie ) { io_close(sock); return; } | 53 | if (!cookie) { |
54 | io_close(sock); | ||
55 | return; | ||
56 | } | ||
50 | 57 | ||
51 | /* whoever sends data is not interested in its input-array */ | 58 | /* whoever sends data is not interested in its input-array */ |
52 | if( ws->keep_alive && ws->header_size != ws->request_size ) { | 59 | if (ws->keep_alive && ws->header_size != ws->request_size) { |
53 | size_t rest = ws->request_size - ws->header_size; | 60 | size_t rest = ws->request_size - ws->header_size; |
54 | if( array_start(&cookie->request) ) { | 61 | if (array_start(&cookie->request)) { |
55 | memmove( array_start(&cookie->request), ws->request + ws->header_size, rest ); | 62 | memmove(array_start(&cookie->request), ws->request + ws->header_size, rest); |
56 | array_truncate( &cookie->request, 1, rest ); | 63 | array_truncate(&cookie->request, 1, rest); |
57 | } else | 64 | } else |
58 | array_catb(&cookie->request, ws->request + ws->header_size, rest ); | 65 | array_catb(&cookie->request, ws->request + ws->header_size, rest); |
59 | } else | 66 | } else |
60 | array_reset( &cookie->request ); | 67 | array_reset(&cookie->request); |
61 | 68 | ||
62 | written_size = write( sock, ws->reply, ws->reply_size ); | 69 | written_size = write(sock, ws->reply, ws->reply_size); |
63 | if( ( written_size < 0 ) || ( ( written_size == ws->reply_size ) && !ws->keep_alive ) ) { | 70 | if ((written_size < 0) || ((written_size == ws->reply_size) && !ws->keep_alive)) { |
64 | array_reset( &cookie->request ); | 71 | array_reset(&cookie->request); |
65 | free( cookie ); io_close( sock ); return; | 72 | free(cookie); |
73 | io_close(sock); | ||
74 | return; | ||
66 | } | 75 | } |
67 | 76 | ||
68 | if( written_size < ws->reply_size ) { | 77 | if (written_size < ws->reply_size) { |
69 | char * outbuf; | 78 | char *outbuf; |
70 | tai6464 t; | 79 | tai6464 t; |
71 | 80 | ||
72 | if( !( outbuf = malloc( ws->reply_size - written_size ) ) ) { | 81 | if (!(outbuf = malloc(ws->reply_size - written_size))) { |
73 | array_reset( &cookie->request ); | 82 | array_reset(&cookie->request); |
74 | free(cookie); io_close( sock ); | 83 | free(cookie); |
84 | io_close(sock); | ||
75 | return; | 85 | return; |
76 | } | 86 | } |
77 | 87 | ||
78 | memcpy( outbuf, ws->reply + written_size, ws->reply_size - written_size ); | 88 | memcpy(outbuf, ws->reply + written_size, ws->reply_size - written_size); |
79 | if ( !cookie->batch ) { | 89 | if (!cookie->batch) { |
80 | cookie->batch = malloc( sizeof(io_batch) ); | 90 | cookie->batch = malloc(sizeof(io_batch)); |
81 | memset( cookie->batch, 0, sizeof(io_batch) ); | 91 | OT_IOB_INIT(cookie->batch); |
82 | cookie->batches = 1; | 92 | cookie->batches = 1; |
83 | } | 93 | } |
84 | 94 | ||
85 | iob_addbuf_free( cookie->batch, outbuf, ws->reply_size - written_size ); | 95 | iob_addbuf_free(cookie->batch, outbuf, ws->reply_size - written_size); |
86 | 96 | ||
87 | /* writeable short data sockets just have a tcp timeout */ | 97 | /* writeable short data sockets just have a tcp timeout */ |
88 | if( !ws->keep_alive ) { | 98 | if (!ws->keep_alive) { |
89 | taia_uint( &t, 0 ); io_timeout( sock, t ); | 99 | taia_uint(&t, 0); |
90 | io_dontwantread( sock ); | 100 | io_timeout(sock, t); |
101 | io_dontwantread(sock); | ||
91 | } | 102 | } |
92 | io_wantwrite( sock ); | 103 | io_wantwrite(sock); |
93 | } | 104 | } |
94 | } | 105 | } |
95 | 106 | ||
96 | #define HTTPERROR_302 return http_issue_error( sock, ws, CODE_HTTPERROR_302 ) | 107 | #define HTTPERROR_302 return http_issue_error(sock, ws, CODE_HTTPERROR_302) |
97 | #define HTTPERROR_400 return http_issue_error( sock, ws, CODE_HTTPERROR_400 ) | 108 | #define HTTPERROR_400 return http_issue_error(sock, ws, CODE_HTTPERROR_400) |
98 | #define HTTPERROR_400_PARAM return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM ) | 109 | #define HTTPERROR_400_PARAM return http_issue_error(sock, ws, CODE_HTTPERROR_400_PARAM) |
99 | #define HTTPERROR_400_COMPACT return http_issue_error( sock, ws, CODE_HTTPERROR_400_COMPACT ) | 110 | #define HTTPERROR_400_COMPACT return http_issue_error(sock, ws, CODE_HTTPERROR_400_COMPACT) |
100 | #define HTTPERROR_400_DOUBLEHASH return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM ) | 111 | #define HTTPERROR_400_DOUBLEHASH return http_issue_error(sock, ws, CODE_HTTPERROR_400_PARAM) |
101 | #define HTTPERROR_402_NOTMODEST return http_issue_error( sock, ws, CODE_HTTPERROR_402_NOTMODEST ) | 112 | #define HTTPERROR_402_NOTMODEST return http_issue_error(sock, ws, CODE_HTTPERROR_402_NOTMODEST) |
102 | #define HTTPERROR_403_IP return http_issue_error( sock, ws, CODE_HTTPERROR_403_IP ) | 113 | #define HTTPERROR_403_IP return http_issue_error(sock, ws, CODE_HTTPERROR_403_IP) |
103 | #define HTTPERROR_404 return http_issue_error( sock, ws, CODE_HTTPERROR_404 ) | 114 | #define HTTPERROR_404 return http_issue_error(sock, ws, CODE_HTTPERROR_404) |
104 | #define HTTPERROR_500 return http_issue_error( sock, ws, CODE_HTTPERROR_500 ) | 115 | #define HTTPERROR_500 return http_issue_error(sock, ws, CODE_HTTPERROR_500) |
105 | ssize_t http_issue_error( const int64 sock, struct ot_workstruct *ws, int code ) { | 116 | ssize_t http_issue_error(const int64 sock, struct ot_workstruct *ws, int code) { |
106 | char *error_code[] = { "302 Found", "400 Invalid Request", "400 Invalid Request", "400 Invalid Request", "402 Payment Required", | 117 | char *error_code[] = {"302 Found", "400 Invalid Request", "400 Invalid Request", "400 Invalid Request", "402 Payment Required", |
107 | "403 Not Modest", "403 Access Denied", "404 Not Found", "500 Internal Server Error" }; | 118 | "403 Not Modest", "403 Access Denied", "404 Not Found", "500 Internal Server Error"}; |
108 | char *title = error_code[code]; | 119 | char *title = error_code[code]; |
109 | 120 | ||
110 | ws->reply = ws->outbuf; | 121 | ws->reply = ws->outbuf; |
111 | if( code == CODE_HTTPERROR_302 ) | 122 | if (code == CODE_HTTPERROR_302) |
112 | ws->reply_size = snprintf( ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 302 Found\r\nContent-Length: 0\r\nLocation: %s\r\n\r\n", g_redirecturl ); | 123 | ws->reply_size = snprintf(ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 302 Found\r\nContent-Length: 0\r\nLocation: %s\r\n\r\n", g_redirecturl); |
113 | else | 124 | else |
114 | ws->reply_size = snprintf( ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 %s\r\nContent-Type: text/html\r\nContent-Length: %zd\r\n\r\n<title>%s</title>\n", title, strlen(title)+16-4,title+4); | 125 | ws->reply_size = snprintf(ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 %s\r\nContent-Type: text/html\r\nContent-Length: %zd\r\n\r\n<title>%s</title>\n", title, |
126 | strlen(title) + 16 - 4, title + 4); | ||
115 | 127 | ||
116 | #ifdef _DEBUG_HTTPERROR | 128 | #ifdef _DEBUG_HTTPERROR |
117 | fprintf( stderr, "DEBUG: invalid request was: %s\n", ws->debugbuf ); | 129 | fprintf(stderr, "DEBUG: invalid request was: %s\n", ws->debugbuf); |
118 | #endif | 130 | #endif |
119 | stats_issue_event( EVENT_FAILED, FLAG_TCP, code ); | 131 | stats_issue_event(EVENT_FAILED, FLAG_TCP, code); |
120 | http_senddata( sock, ws ); | 132 | http_senddata(sock, ws); |
121 | return ws->reply_size = -2; | 133 | return ws->reply_size = -2; |
122 | } | 134 | } |
123 | 135 | ||
124 | ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ) { | 136 | ssize_t http_sendiovecdata(const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial) { |
125 | struct http_data *cookie = io_getcookie( sock ); | 137 | struct http_data *cookie = io_getcookie(sock); |
126 | char *header; | 138 | io_batch *current; |
127 | int i; | 139 | char *header; |
128 | size_t header_size, size = iovec_length( &iovec_entries, (const struct iovec **)&iovector ); | 140 | const char *encoding = ""; |
129 | tai6464 t; | 141 | int i; |
142 | size_t header_size, size = iovec_length(&iovec_entries, (const struct iovec **)&iovector); | ||
143 | tai6464 t; | ||
130 | 144 | ||
131 | /* No cookie? Bad socket. Leave. */ | 145 | /* No cookie? Bad socket. Leave. */ |
132 | if( !cookie ) { | 146 | if (!cookie) { |
133 | iovec_free( &iovec_entries, &iovector ); | 147 | iovec_free(&iovec_entries, &iovector); |
134 | HTTPERROR_500; | 148 | HTTPERROR_500; |
135 | } | 149 | } |
136 | 150 | ||
137 | /* If this socket collected request in a buffer, free it now */ | 151 | /* If this socket collected request in a buffer, free it now */ |
138 | array_reset( &cookie->request ); | 152 | array_reset(&cookie->request); |
139 | 153 | ||
140 | /* If we came here, wait for the answer is over */ | 154 | /* If we came here, wait for the answer is over */ |
141 | cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK; | 155 | if (cookie->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK) { |
142 | 156 | io_dontwantread(sock); | |
143 | /* Our answers never are 0 vectors. Return an error. */ | 157 | cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK; |
144 | if( !iovec_entries ) { | ||
145 | HTTPERROR_500; | ||
146 | } | 158 | } |
147 | 159 | ||
148 | /* Prepare space for http header */ | 160 | if (iovec_entries) { |
149 | header = malloc( SUCCESS_HTTP_HEADER_LENGTH + SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING ); | 161 | |
150 | if( !header ) { | 162 | if (cookie->flag & STRUCT_HTTP_FLAG_ZSTD) |
151 | iovec_free( &iovec_entries, &iovector ); | 163 | encoding = "Content-Encoding: zstd\r\n"; |
152 | HTTPERROR_500; | 164 | else if (cookie->flag & STRUCT_HTTP_FLAG_GZIP) |
153 | } | 165 | encoding = "Content-Encoding: gzip\r\n"; |
154 | 166 | else if (cookie->flag & STRUCT_HTTP_FLAG_BZIP2) | |
155 | if( cookie->flag & STRUCT_HTTP_FLAG_GZIP ) | 167 | encoding = "Content-Encoding: bzip2\r\n"; |
156 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: gzip\r\nContent-Length: %zd\r\n\r\n", size ); | 168 | |
157 | else if( cookie->flag & STRUCT_HTTP_FLAG_BZIP2 ) | 169 | if (!(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED)) |
158 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: bzip2\r\nContent-Length: %zd\r\n\r\n", size ); | 170 | header_size = asprintf(&header, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n%sContent-Length: %zd\r\n\r\n", encoding, size); |
159 | else | 171 | else { |
160 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r\n", size ); | 172 | if (!(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER)) { |
173 | header_size = | ||
174 | asprintf(&header, "HTTP/1.1 200 OK\r\nContent-Type: application/octet-stream\r\n%sTransfer-Encoding: chunked\r\n\r\n%zx\r\n", encoding, size); | ||
175 | cookie->flag |= STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER; | ||
176 | } else | ||
177 | header_size = asprintf(&header, "%zx\r\n", size); | ||
178 | } | ||
179 | if (!header) { | ||
180 | iovec_free(&iovec_entries, &iovector); | ||
181 | HTTPERROR_500; | ||
182 | } | ||
161 | 183 | ||
162 | if (!cookie->batch ) { | 184 | if (!cookie->batch) { |
163 | cookie->batch = malloc( sizeof(io_batch) ); | 185 | cookie->batch = malloc(sizeof(io_batch)); |
164 | memset( cookie->batch, 0, sizeof(io_batch) ); | 186 | if (!cookie->batch) { |
165 | cookie->batches = 1; | 187 | free(header); |
166 | } | 188 | iovec_free(&iovec_entries, &iovector); |
167 | iob_addbuf_free( cookie->batch, header, header_size ); | 189 | HTTPERROR_500; |
168 | 190 | } | |
169 | /* Split huge iovectors into separate io_batches */ | 191 | OT_IOB_INIT(cookie->batch); |
170 | for( i=0; i<iovec_entries; ++i ) { | 192 | cookie->batches = 1; |
171 | io_batch *current = cookie->batch + cookie->batches - 1; | 193 | } |
172 | 194 | current = cookie->batch + cookie->batches - 1; | |
173 | /* If the current batch's limit is reached, try to reallocate a new batch to work on */ | 195 | iob_addbuf_free(current, header, header_size); |
174 | if( current->bytesleft > OT_BATCH_LIMIT ) { | 196 | |
175 | io_batch * new_batch = realloc( current, (cookie->batches + 1) * sizeof(io_batch) ); | 197 | /* Split huge iovectors into separate io_batches */ |
176 | if( new_batch ) { | 198 | for (i = 0; i < iovec_entries; ++i) { |
177 | cookie->batches++; | 199 | /* If the current batch's limit is reached, try to reallocate a new batch to work on */ |
178 | current = cookie->batch = new_batch; | 200 | if (current->bytesleft > OT_BATCH_LIMIT) { |
179 | memset( current, 0, sizeof(io_batch) ); | 201 | io_batch *new_batch = realloc(cookie->batch, (cookie->batches + 1) * sizeof(io_batch)); |
202 | if (new_batch) { | ||
203 | cookie->batch = new_batch; | ||
204 | current = cookie->batch + cookie->batches++; | ||
205 | OT_IOB_INIT(current); | ||
180 | } | 206 | } |
207 | } | ||
208 | iob_addbuf_free(current, iovector[i].iov_base, iovector[i].iov_len); | ||
181 | } | 209 | } |
210 | free(iovector); | ||
211 | if (cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER) | ||
212 | iob_addbuf(current, "\r\n", 2); | ||
213 | } | ||
182 | 214 | ||
183 | iob_addbuf_free( current, iovector[i].iov_base, iovector[i].iov_len ); | 215 | if ((cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER) && cookie->batch && !is_partial) { |
216 | current = cookie->batch + cookie->batches - 1; | ||
217 | iob_addbuf(current, "0\r\n\r\n", 5); | ||
218 | cookie->flag &= ~STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER; | ||
184 | } | 219 | } |
185 | free( iovector ); | ||
186 | 220 | ||
187 | /* writeable sockets timeout after 10 minutes */ | 221 | /* writeable sockets timeout after 10 minutes */ |
188 | taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND ); | 222 | taia_now(&t); |
189 | io_timeout( sock, t ); | 223 | taia_addsec(&t, &t, OT_CLIENT_TIMEOUT_SEND); |
190 | io_dontwantread( sock ); | 224 | io_timeout(sock, t); |
191 | io_wantwrite( sock ); | 225 | io_wantwrite(sock); |
192 | return 0; | 226 | return 0; |
193 | } | 227 | } |
194 | 228 | ||
195 | static ssize_t http_handle_stats( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { | 229 | static ssize_t http_handle_stats(const int64 sock, struct ot_workstruct *ws, char *read_ptr) { |
196 | static const ot_keywords keywords_main[] = | 230 | static const ot_keywords keywords_main[] = {{"mode", 1}, {"format", 2}, {"info_hash", 3}, {NULL, -3}}; |
197 | { { "mode", 1 }, {"format", 2 }, { NULL, -3 } }; | 231 | static const ot_keywords keywords_mode[] = {{"peer", TASK_STATS_PEERS}, |
198 | static const ot_keywords keywords_mode[] = | 232 | {"conn", TASK_STATS_CONNS}, |
199 | { { "peer", TASK_STATS_PEERS }, { "conn", TASK_STATS_CONNS }, { "scrp", TASK_STATS_SCRAPE }, { "udp4", TASK_STATS_UDP }, { "tcp4", TASK_STATS_TCP }, | 233 | {"scrp", TASK_STATS_SCRAPE}, |
200 | { "busy", TASK_STATS_BUSY_NETWORKS }, { "torr", TASK_STATS_TORRENTS }, { "fscr", TASK_STATS_FULLSCRAPE }, | 234 | {"udp4", TASK_STATS_UDP}, |
201 | { "s24s", TASK_STATS_SLASH24S }, { "tpbs", TASK_STATS_TPB }, { "herr", TASK_STATS_HTTPERRORS }, { "completed", TASK_STATS_COMPLETED }, | 235 | {"tcp4", TASK_STATS_TCP}, |
202 | { "top100", TASK_STATS_TOP100 }, { "top10", TASK_STATS_TOP10 }, { "renew", TASK_STATS_RENEW }, { "syncs", TASK_STATS_SYNCS }, { "version", TASK_STATS_VERSION }, | 236 | {"busy", TASK_STATS_BUSY_NETWORKS}, |
203 | { "everything", TASK_STATS_EVERYTHING }, { "statedump", TASK_FULLSCRAPE_TRACKERSTATE }, { "fulllog", TASK_STATS_FULLLOG }, | 237 | {"torr", TASK_STATS_TORRENTS}, |
204 | { "woodpeckers", TASK_STATS_WOODPECKERS}, | 238 | {"fscr", TASK_STATS_FULLSCRAPE}, |
239 | {"s24s", TASK_STATS_SLASH24S}, | ||
240 | {"tpbs", TASK_STATS_TPB}, | ||
241 | {"herr", TASK_STATS_HTTPERRORS}, | ||
242 | {"completed", TASK_STATS_COMPLETED}, | ||
243 | {"top100", TASK_STATS_TOP100}, | ||
244 | {"top10", TASK_STATS_TOP10}, | ||
245 | {"renew", TASK_STATS_RENEW}, | ||
246 | {"syncs", TASK_STATS_SYNCS}, | ||
247 | {"version", TASK_STATS_VERSION}, | ||
248 | {"everything", TASK_STATS_EVERYTHING}, | ||
249 | {"statedump", TASK_FULLSCRAPE_TRACKERSTATE}, | ||
250 | {"fulllog", TASK_STATS_FULLLOG}, | ||
251 | {"woodpeckers", TASK_STATS_WOODPECKERS}, | ||
205 | #ifdef WANT_LOG_NUMWANT | 252 | #ifdef WANT_LOG_NUMWANT |
206 | { "numwants", TASK_STATS_NUMWANTS}, | 253 | {"numwants", TASK_STATS_NUMWANTS}, |
207 | #endif | 254 | #endif |
208 | { NULL, -3 } }; | 255 | {NULL, -3}}; |
209 | static const ot_keywords keywords_format[] = | 256 | static const ot_keywords keywords_format[] = {{"bin", TASK_FULLSCRAPE_TPB_BINARY}, {"ben", TASK_FULLSCRAPE}, |
210 | { { "bin", TASK_FULLSCRAPE_TPB_BINARY }, { "ben", TASK_FULLSCRAPE }, { "url", TASK_FULLSCRAPE_TPB_URLENCODED }, | 257 | {"url", TASK_FULLSCRAPE_TPB_URLENCODED}, {"txt", TASK_FULLSCRAPE_TPB_ASCII}, |
211 | { "txt", TASK_FULLSCRAPE_TPB_ASCII }, { "txtp", TASK_FULLSCRAPE_TPB_ASCII_PLUS }, { NULL, -3 } }; | 258 | {"txtp", TASK_FULLSCRAPE_TPB_ASCII_PLUS}, {NULL, -3}}; |
212 | 259 | ||
213 | int mode = TASK_STATS_PEERS, scanon = 1, format = 0; | 260 | int mode = TASK_STATS_PEERS, scanon = 1, format = 0; |
214 | 261 | ||
215 | #ifdef WANT_RESTRICT_STATS | 262 | #ifdef WANT_RESTRICT_STATS |
216 | struct http_data *cookie = io_getcookie( sock ); | 263 | struct http_data *cookie = io_getcookie(sock); |
217 | 264 | ||
218 | if( !cookie || !accesslist_isblessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) | 265 | if (!cookie || !accesslist_is_blessed(cookie->ip, OT_PERMISSION_MAY_STAT)) |
219 | HTTPERROR_403_IP; | 266 | HTTPERROR_403_IP; |
220 | #endif | 267 | #endif |
221 | 268 | ||
222 | while( scanon ) { | 269 | while (scanon) { |
223 | switch( scan_find_keywords( keywords_main, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { | 270 | switch (scan_find_keywords(keywords_main, &read_ptr, SCAN_SEARCHPATH_PARAM)) { |
224 | case -2: scanon = 0; break; /* TERMINATOR */ | 271 | case -2: |
225 | case -1: HTTPERROR_400_PARAM; /* PARSE ERROR */ | 272 | scanon = 0; |
226 | case -3: scan_urlencoded_skipvalue( &read_ptr ); break; | 273 | break; /* TERMINATOR */ |
227 | case 1: /* matched "mode" */ | 274 | case -1: |
228 | if( ( mode = scan_find_keywords( keywords_mode, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; | 275 | HTTPERROR_400_PARAM; /* PARSE ERROR */ |
276 | case -3: | ||
277 | scan_urlencoded_skipvalue(&read_ptr); | ||
278 | break; | ||
279 | case 1: /* matched "mode" */ | ||
280 | if ((mode = scan_find_keywords(keywords_mode, &read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0) | ||
281 | HTTPERROR_400_PARAM; | ||
229 | break; | 282 | break; |
230 | case 2: /* matched "format" */ | 283 | case 2: /* matched "format" */ |
231 | if( ( format = scan_find_keywords( keywords_format, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; | 284 | if ((format = scan_find_keywords(keywords_format, &read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0) |
285 | HTTPERROR_400_PARAM; | ||
232 | break; | 286 | break; |
287 | case 3: | ||
288 | HTTPERROR_400_PARAM; /* If the stats URL was mistakenly added as announce URL, return a 400 */ | ||
233 | } | 289 | } |
234 | } | 290 | } |
235 | 291 | ||
236 | #ifdef WANT_FULLSCRAPE | 292 | #ifdef WANT_FULLSCRAPE |
237 | if( mode == TASK_FULLSCRAPE_TRACKERSTATE ) { | 293 | if (mode == TASK_FULLSCRAPE_TRACKERSTATE) { |
238 | format = mode; mode = TASK_STATS_TPB; | 294 | format = mode; |
295 | mode = TASK_STATS_TPB; | ||
239 | } | 296 | } |
240 | 297 | ||
241 | if( mode == TASK_STATS_TPB ) { | 298 | if (mode == TASK_STATS_TPB) { |
242 | struct http_data* cookie = io_getcookie( sock ); | 299 | struct http_data *cookie = io_getcookie(sock); |
243 | tai6464 t; | 300 | tai6464 t; |
244 | #ifdef WANT_COMPRESSION_GZIP | 301 | #ifdef WANT_COMPRESSION_GZIP |
245 | ws->request[ws->request_size] = 0; | 302 | ws->request[ws->request_size] = 0; |
246 | #ifdef WANT_COMPRESSION_GZIP_ALWAYS | 303 | #ifndef WANT_COMPRESSION_GZIP_ALWAYS |
247 | if( strstr( read_ptr - 1, "gzip" ) ) { | 304 | if (strstr(read_ptr - 1, "gzip")) { |
248 | #endif | 305 | #endif |
249 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; | 306 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; |
250 | format |= TASK_FLAG_GZIP; | 307 | format |= TASK_FLAG_GZIP; |
251 | #ifdef WANT_COMPRESSION_GZIP_ALWAYS | 308 | #ifndef WANT_COMPRESSION_GZIP_ALWAYS |
252 | } | 309 | } |
253 | #endif | 310 | #endif |
254 | #endif | 311 | #endif |
255 | /* Pass this task to the worker thread */ | 312 | /* Pass this task to the worker thread */ |
256 | cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; | 313 | cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED; |
257 | 314 | ||
258 | /* Clients waiting for us should not easily timeout */ | 315 | /* Clients waiting for us should not easily timeout */ |
259 | taia_uint( &t, 0 ); io_timeout( sock, t ); | 316 | taia_uint(&t, 0); |
260 | fullscrape_deliver( sock, format ); | 317 | io_timeout(sock, t); |
261 | io_dontwantread( sock ); | 318 | fullscrape_deliver(sock, format); |
319 | io_dontwantread(sock); | ||
262 | return ws->reply_size = -2; | 320 | return ws->reply_size = -2; |
263 | } | 321 | } |
264 | #endif | 322 | #endif |
265 | 323 | ||
266 | /* default format for now */ | 324 | /* default format for now */ |
267 | if( ( mode & TASK_CLASS_MASK ) == TASK_STATS ) { | 325 | if ((mode & TASK_CLASS_MASK) == TASK_STATS) { |
268 | tai6464 t; | 326 | tai6464 t; |
269 | /* Complex stats also include expensive memory debugging tools */ | 327 | /* Complex stats also include expensive memory debugging tools */ |
270 | taia_uint( &t, 0 ); io_timeout( sock, t ); | 328 | taia_uint(&t, 0); |
271 | stats_deliver( sock, mode ); | 329 | io_timeout(sock, t); |
330 | stats_deliver(sock, mode); | ||
272 | return ws->reply_size = -2; | 331 | return ws->reply_size = -2; |
273 | } | 332 | } |
274 | 333 | ||
275 | /* Simple stats can be answerred immediately */ | 334 | /* Simple stats can be answerred immediately */ |
276 | return ws->reply_size = return_stats_for_tracker( ws->reply, mode, 0 ); | 335 | return ws->reply_size = return_stats_for_tracker(ws->reply, mode, 0); |
277 | } | 336 | } |
278 | 337 | ||
279 | #ifdef WANT_MODEST_FULLSCRAPES | 338 | #ifdef WANT_MODEST_FULLSCRAPES |
280 | static pthread_mutex_t g_modest_fullscrape_mutex = PTHREAD_MUTEX_INITIALIZER; | 339 | static pthread_mutex_t g_modest_fullscrape_mutex = PTHREAD_MUTEX_INITIALIZER; |
281 | static ot_vector g_modest_fullscrape_timeouts; | 340 | static ot_vector g_modest_fullscrape_timeouts; |
282 | typedef struct { ot_ip6 ip; ot_time last_fullscrape; } ot_scrape_log; | 341 | typedef struct { |
342 | ot_ip6 ip; | ||
343 | ot_time last_fullscrape; | ||
344 | } ot_scrape_log; | ||
283 | #endif | 345 | #endif |
284 | 346 | ||
285 | #ifdef WANT_FULLSCRAPE | 347 | #ifdef WANT_FULLSCRAPE |
286 | static ssize_t http_handle_fullscrape( const int64 sock, struct ot_workstruct *ws ) { | 348 | static ssize_t http_handle_fullscrape(const int64 sock, struct ot_workstruct *ws) { |
287 | struct http_data* cookie = io_getcookie( sock ); | 349 | struct http_data *cookie = io_getcookie(sock); |
288 | int format = 0; | 350 | int format = 0; |
289 | tai6464 t; | 351 | tai6464 t; |
290 | 352 | ||
291 | #ifdef WANT_MODEST_FULLSCRAPES | 353 | #ifdef WANT_MODEST_FULLSCRAPES |
292 | { | 354 | { |
293 | ot_scrape_log this_peer, *new_peer; | 355 | ot_scrape_log this_peer, *new_peer; |
294 | int exactmatch; | 356 | int exactmatch; |
295 | memcpy( this_peer.ip, cookie->ip, sizeof(ot_ip6)); | 357 | memcpy(this_peer.ip, cookie->ip, sizeof(ot_ip6)); |
296 | this_peer.last_fullscrape = g_now_seconds; | 358 | this_peer.last_fullscrape = g_now_seconds; |
297 | pthread_mutex_lock(&g_modest_fullscrape_mutex); | 359 | pthread_mutex_lock(&g_modest_fullscrape_mutex); |
298 | new_peer = vector_find_or_insert( &g_modest_fullscrape_timeouts, &this_peer, sizeof(ot_scrape_log), sizeof(ot_ip6), &exactmatch ); | 360 | new_peer = vector_find_or_insert(&g_modest_fullscrape_timeouts, &this_peer, sizeof(ot_scrape_log), sizeof(ot_ip6), &exactmatch); |
299 | if( !new_peer ) { | 361 | if (!new_peer) { |
300 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); | 362 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); |
301 | HTTPERROR_500; | 363 | HTTPERROR_500; |
302 | } | 364 | } |
303 | if( exactmatch && ( this_peer.last_fullscrape - new_peer->last_fullscrape ) < OT_MODEST_PEER_TIMEOUT ) { | 365 | if (exactmatch && (this_peer.last_fullscrape - new_peer->last_fullscrape) < OT_MODEST_PEER_TIMEOUT) { |
304 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); | 366 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); |
305 | HTTPERROR_402_NOTMODEST; | 367 | HTTPERROR_402_NOTMODEST; |
306 | } | 368 | } |
307 | memcpy( new_peer, &this_peer, sizeof(ot_scrape_log)); | 369 | memcpy(new_peer, &this_peer, sizeof(ot_scrape_log)); |
308 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); | 370 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); |
309 | } | 371 | } |
310 | #endif | 372 | #endif |
311 | 373 | ||
374 | |||
375 | #if defined(WANT_COMPRESSION_GZIP) || defined(WANT_COMPRESSION_ZSTD) | ||
376 | ws->request[ws->request_size - 1] = 0; | ||
312 | #ifdef WANT_COMPRESSION_GZIP | 377 | #ifdef WANT_COMPRESSION_GZIP |
313 | ws->request[ws->request_size-1] = 0; | 378 | if (strstr(ws->request, "gzip")) { |
314 | if( strstr( ws->request, "gzip" ) ) { | ||
315 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; | 379 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; |
316 | format = TASK_FLAG_GZIP; | 380 | format |= TASK_FLAG_GZIP; |
317 | stats_issue_event( EVENT_FULLSCRAPE_REQUEST_GZIP, 0, (uintptr_t)cookie->ip ); | 381 | } |
318 | } else | 382 | #endif |
383 | #ifdef WANT_COMPRESSION_ZSTD | ||
384 | if (strstr(ws->request, "zstd")) { | ||
385 | cookie->flag |= STRUCT_HTTP_FLAG_ZSTD; | ||
386 | format |= TASK_FLAG_ZSTD; | ||
387 | } | ||
388 | #endif | ||
389 | |||
390 | #if defined(WANT_COMPRESSION_ZSTD) && defined(WANT_COMPRESSION_ZSTD_ALWAYS) | ||
391 | cookie->flag |= STRUCT_HTTP_FLAG_ZSTD; | ||
392 | format |= TASK_FLAG_ZSTD; | ||
319 | #endif | 393 | #endif |
320 | stats_issue_event( EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)cookie->ip ); | 394 | |
395 | #if defined(WANT_COMPRESSION_GZIP) && defined(WANT_COMPRESSION_GZIP_ALWAYS) | ||
396 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; | ||
397 | format |= TASK_FLAG_GZIP; | ||
398 | #endif | ||
399 | #endif | ||
400 | |||
401 | stats_issue_event(EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)cookie->ip); | ||
321 | 402 | ||
322 | #ifdef _DEBUG_HTTPERROR | 403 | #ifdef _DEBUG_HTTPERROR |
323 | fprintf( stderr, "%s", ws->debugbuf ); | 404 | fprintf(stderr, "%s", ws->debugbuf); |
324 | #endif | 405 | #endif |
325 | 406 | ||
326 | /* Pass this task to the worker thread */ | 407 | /* Pass this task to the worker thread */ |
327 | cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; | 408 | cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED; |
328 | /* Clients waiting for us should not easily timeout */ | 409 | /* Clients waiting for us should not easily timeout */ |
329 | taia_uint( &t, 0 ); io_timeout( sock, t ); | 410 | taia_uint(&t, 0); |
330 | fullscrape_deliver( sock, TASK_FULLSCRAPE | format ); | 411 | io_timeout(sock, t); |
331 | io_dontwantread( sock ); | 412 | fullscrape_deliver(sock, TASK_FULLSCRAPE | format); |
413 | io_dontwantread(sock); | ||
332 | return ws->reply_size = -2; | 414 | return ws->reply_size = -2; |
333 | } | 415 | } |
334 | #endif | 416 | #endif |
335 | 417 | ||
336 | static ssize_t http_handle_scrape( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { | 418 | static ssize_t http_handle_scrape(const int64 sock, struct ot_workstruct *ws, char *read_ptr) { |
337 | static const ot_keywords keywords_scrape[] = { { "info_hash", 1 }, { NULL, -3 } }; | 419 | static const ot_keywords keywords_scrape[] = {{"info_hash", 1}, {NULL, -3}}; |
338 | 420 | ||
339 | ot_hash * multiscrape_buf = (ot_hash*)ws->request; | 421 | ot_hash *multiscrape_buf = (ot_hash *)ws->request; |
340 | int scanon = 1, numwant = 0; | 422 | int scanon = 1, numwant = 0; |
341 | 423 | ||
342 | /* This is to hack around stupid clients that send "scrape ?info_hash" */ | 424 | /* This is to hack around stupid clients that send "scrape ?info_hash" */ |
343 | if( read_ptr[-1] != '?' ) { | 425 | if (read_ptr[-1] != '?') { |
344 | while( ( *read_ptr != '?' ) && ( *read_ptr != '\n' ) ) ++read_ptr; | 426 | while ((*read_ptr != '?') && (*read_ptr != '\n')) |
345 | if( *read_ptr == '\n' ) HTTPERROR_400_PARAM; | 427 | ++read_ptr; |
428 | if (*read_ptr == '\n') | ||
429 | HTTPERROR_400_PARAM; | ||
346 | ++read_ptr; | 430 | ++read_ptr; |
347 | } | 431 | } |
348 | 432 | ||
349 | while( scanon ) { | 433 | while (scanon) { |
350 | switch( scan_find_keywords( keywords_scrape, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { | 434 | switch (scan_find_keywords(keywords_scrape, &read_ptr, SCAN_SEARCHPATH_PARAM)) { |
351 | case -2: scanon = 0; break; /* TERMINATOR */ | 435 | case -2: |
352 | default: HTTPERROR_400_PARAM; /* PARSE ERROR */ | 436 | scanon = 0; |
353 | case -3: scan_urlencoded_skipvalue( &read_ptr ); break; | 437 | break; /* TERMINATOR */ |
354 | case 1: /* matched "info_hash" */ | 438 | default: |
439 | HTTPERROR_400_PARAM; /* PARSE ERROR */ | ||
440 | case -3: | ||
441 | scan_urlencoded_skipvalue(&read_ptr); | ||
442 | break; | ||
443 | case 1: /* matched "info_hash" */ | ||
355 | /* ignore this, when we have less than 20 bytes */ | 444 | /* ignore this, when we have less than 20 bytes */ |
356 | if( scan_urlencoded_query( &read_ptr, (char*)(multiscrape_buf + numwant++), SCAN_SEARCHPATH_VALUE ) != (ssize_t)sizeof(ot_hash) ) | 445 | if (scan_urlencoded_query(&read_ptr, (char *)(multiscrape_buf + numwant++), SCAN_SEARCHPATH_VALUE) != (ssize_t)sizeof(ot_hash)) |
357 | HTTPERROR_400_PARAM; | 446 | HTTPERROR_400_PARAM; |
358 | break; | 447 | break; |
359 | } | 448 | } |
360 | } | 449 | } |
361 | 450 | ||
362 | /* No info_hash found? Inform user */ | 451 | /* No info_hash found? Inform user */ |
363 | if( !numwant ) HTTPERROR_400_PARAM; | 452 | if (!numwant) |
453 | HTTPERROR_400_PARAM; | ||
364 | 454 | ||
365 | /* Limit number of hashes to process */ | 455 | /* Limit number of hashes to process */ |
366 | if( numwant > OT_MAXMULTISCRAPE_COUNT ) | 456 | if (numwant > OT_MAXMULTISCRAPE_COUNT) |
367 | numwant = OT_MAXMULTISCRAPE_COUNT; | 457 | numwant = OT_MAXMULTISCRAPE_COUNT; |
368 | 458 | ||
369 | /* Enough for http header + whole scrape string */ | 459 | /* Enough for http header + whole scrape string */ |
370 | ws->reply_size = return_tcp_scrape_for_torrent( multiscrape_buf, numwant, ws->reply ); | 460 | ws->reply_size = return_tcp_scrape_for_torrent((const ot_hash *)multiscrape_buf, numwant, ws->reply); |
371 | stats_issue_event( EVENT_SCRAPE, FLAG_TCP, ws->reply_size ); | 461 | stats_issue_event(EVENT_SCRAPE, FLAG_TCP, ws->reply_size); |
372 | return ws->reply_size; | 462 | return ws->reply_size; |
373 | } | 463 | } |
374 | 464 | ||
375 | #ifdef WANT_LOG_NUMWANT | 465 | #ifdef WANT_LOG_NUMWANT |
376 | unsigned long long numwants[201]; | 466 | unsigned long long numwants[201]; |
377 | #endif | 467 | #endif |
378 | 468 | ||
379 | #if defined( WANT_KEEPALIVE ) || defined( WANT_IP_FROM_PROXY ) | 469 | #if defined(WANT_KEEPALIVE) || defined(WANT_IP_FROM_PROXY) |
380 | static char* http_header( char *data, size_t byte_count, char *header ) { | 470 | static char *http_header(char *data, size_t byte_count, char *header) { |
381 | size_t i; | 471 | size_t i; |
382 | long sl = strlen( header ); | 472 | long sl = strlen(header); |
383 | for( i = 0; i + sl + 2 < byte_count; ++i ) { | 473 | for (i = 0; i + sl + 2 < byte_count; ++i) { |
384 | if( data[i] != '\n' || data[ i + sl + 1] != ':' ) continue; | 474 | if (data[i] != '\n' || data[i + sl + 1] != ':') |
385 | if( !case_equalb( data + i + 1, sl, header ) ) continue; | 475 | continue; |
476 | if (!case_equalb(data + i + 1, sl, header)) | ||
477 | continue; | ||
386 | data += i + sl + 2; | 478 | data += i + sl + 2; |
387 | while( *data == ' ' || *data == '\t' ) ++data; | 479 | while (*data == ' ' || *data == '\t') |
480 | ++data; | ||
388 | return data; | 481 | return data; |
389 | } | 482 | } |
390 | return 0; | 483 | return 0; |
391 | } | 484 | } |
392 | #endif | 485 | #endif |
393 | 486 | ||
394 | static ot_keywords keywords_announce[] = { { "port", 1 }, { "left", 2 }, { "event", 3 }, { "numwant", 4 }, { "compact", 5 }, { "compact6", 5 }, { "info_hash", 6 }, | 487 | static ot_keywords keywords_announce[] = {{"port", 1}, {"left", 2}, {"event", 3}, {"numwant", 4}, {"compact", 5}, {"compact6", 5}, {"info_hash", 6}, |
395 | #ifdef WANT_IP_FROM_QUERY_STRING | 488 | #ifdef WANT_IP_FROM_QUERY_STRING |
396 | { "ip", 7 }, | 489 | {"ip", 7}, |
397 | #endif | 490 | #endif |
398 | #ifdef WANT_FULLLOG_NETWORKS | 491 | #ifdef WANT_FULLLOG_NETWORKS |
399 | { "lognet", 8 }, | 492 | {"lognet", 8}, |
400 | #endif | 493 | #endif |
401 | { "peer_id", 9 }, | 494 | {"peer_id", 9}, {NULL, -3}}; |
402 | { NULL, -3 } }; | 495 | static ot_keywords keywords_announce_event[] = {{"completed", 1}, {"stopped", 2}, {NULL, -3}}; |
403 | static ot_keywords keywords_announce_event[] = { { "completed", 1 }, { "stopped", 2 }, { NULL, -3 } }; | 496 | static ssize_t http_handle_announce(const int64 sock, struct ot_workstruct *ws, char *read_ptr) { |
404 | static ssize_t http_handle_announce( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { | ||
405 | int numwant, tmp, scanon; | 497 | int numwant, tmp, scanon; |
406 | unsigned short port = 0; | 498 | unsigned short port = 0; |
407 | char *write_ptr; | 499 | char *write_ptr; |
408 | ssize_t len; | 500 | ssize_t len; |
409 | struct http_data *cookie = io_getcookie( sock ); | 501 | struct http_data *cookie = io_getcookie(sock); |
410 | 502 | ||
411 | /* This is to hack around stupid clients that send "announce ?info_hash" */ | 503 | /* This is to hack around stupid clients that send "announce ?info_hash" */ |
412 | if( read_ptr[-1] != '?' ) { | 504 | if (read_ptr[-1] != '?') { |
413 | while( ( *read_ptr != '?' ) && ( *read_ptr != '\n' ) ) ++read_ptr; | 505 | while ((*read_ptr != '?') && (*read_ptr != '\n')) |
414 | if( *read_ptr == '\n' ) HTTPERROR_400_PARAM; | 506 | ++read_ptr; |
507 | if (*read_ptr == '\n') | ||
508 | HTTPERROR_400_PARAM; | ||
415 | ++read_ptr; | 509 | ++read_ptr; |
416 | } | 510 | } |
417 | 511 | ||
418 | #ifdef WANT_IP_FROM_PROXY | 512 | #ifdef WANT_IP_FROM_PROXY |
419 | if( accesslist_isblessed( cookie->ip, OT_PERMISSION_MAY_PROXY ) ) { | 513 | if (accesslist_is_blessed(cookie->ip, OT_PERMISSION_MAY_PROXY)) { |
420 | ot_ip6 proxied_ip; | 514 | ot_ip6 proxied_ip; |
421 | char *fwd = http_header( ws->request, ws->header_size, "x-forwarded-for" ); | 515 | char *fwd = http_header(ws->request, ws->header_size, "x-forwarded-for"); |
422 | if( fwd && scan_ip6( fwd, proxied_ip ) ) | 516 | if (fwd && scan_ip6(fwd, proxied_ip)) { |
423 | OT_SETIP( &ws->peer, proxied_ip ); | 517 | OT_SETIP(ws->peer, proxied_ip); |
424 | else | 518 | } else |
425 | OT_SETIP( &ws->peer, cookie->ip ); | 519 | OT_SETIP(ws->peer, cookie->ip); |
426 | } else | 520 | } else |
427 | #endif | 521 | #endif |
428 | OT_SETIP( &ws->peer, cookie->ip ); | 522 | OT_SETIP(ws->peer, cookie->ip); |
429 | 523 | ||
430 | ws->peer_id = NULL; | 524 | ws->peer_id = NULL; |
431 | ws->hash = NULL; | 525 | ws->hash = NULL; |
432 | 526 | ||
433 | OT_SETPORT( &ws->peer, &port ); | 527 | OT_SETPORT(ws->peer, &port); |
434 | OT_PEERFLAG( &ws->peer ) = 0; | 528 | OT_PEERFLAG(ws->peer) = 0; |
435 | numwant = 50; | 529 | numwant = 50; |
436 | scanon = 1; | 530 | scanon = 1; |
437 | 531 | ||
438 | while( scanon ) { | 532 | while (scanon) { |
439 | switch( scan_find_keywords(keywords_announce, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { | 533 | switch (scan_find_keywords(keywords_announce, &read_ptr, SCAN_SEARCHPATH_PARAM)) { |
440 | case -2: scanon = 0; break; /* TERMINATOR */ | 534 | case -2: |
441 | case -1: HTTPERROR_400_PARAM; /* PARSE ERROR */ | 535 | scanon = 0; |
442 | case -3: scan_urlencoded_skipvalue( &read_ptr ); break; | 536 | break; /* TERMINATOR */ |
537 | case -1: | ||
538 | HTTPERROR_400_PARAM; /* PARSE ERROR */ | ||
539 | case -3: | ||
540 | scan_urlencoded_skipvalue(&read_ptr); | ||
541 | break; | ||
443 | case 1: /* matched "port" */ | 542 | case 1: /* matched "port" */ |
444 | len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); | 543 | len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE); |
445 | if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &tmp ) || ( tmp > 0xffff ) ) HTTPERROR_400_PARAM; | 544 | if ((len <= 0) || scan_fixed_int(write_ptr, len, &tmp) || (tmp > 0xffff)) |
446 | port = htons( tmp ); OT_SETPORT( &ws->peer, &port ); | 545 | HTTPERROR_400_PARAM; |
546 | port = htons(tmp); | ||
547 | OT_SETPORT(&ws->peer, &port); | ||
447 | break; | 548 | break; |
448 | case 2: /* matched "left" */ | 549 | case 2: /* matched "left" */ |
449 | if( ( len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; | 550 | if ((len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0) |
450 | if( scan_fixed_int( write_ptr, len, &tmp ) ) tmp = 0; | 551 | HTTPERROR_400_PARAM; |
451 | if( !tmp ) OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_SEEDING; | 552 | if (scan_fixed_int(write_ptr, len, &tmp)) |
553 | tmp = 0; | ||
554 | if (!tmp) | ||
555 | OT_PEERFLAG(&ws->peer) |= PEER_FLAG_SEEDING; | ||
452 | break; | 556 | break; |
453 | case 3: /* matched "event" */ | 557 | case 3: /* matched "event" */ |
454 | switch( scan_find_keywords( keywords_announce_event, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) { | 558 | switch (scan_find_keywords(keywords_announce_event, &read_ptr, SCAN_SEARCHPATH_VALUE)) { |
455 | case -1: HTTPERROR_400_PARAM; | 559 | case -1: |
456 | case 1: /* matched "completed" */ | 560 | HTTPERROR_400_PARAM; |
457 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED; | 561 | case 1: /* matched "completed" */ |
458 | break; | 562 | OT_PEERFLAG(&ws->peer) |= PEER_FLAG_COMPLETED; |
459 | case 2: /* matched "stopped" */ | 563 | break; |
460 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED; | 564 | case 2: /* matched "stopped" */ |
461 | break; | 565 | OT_PEERFLAG(&ws->peer) |= PEER_FLAG_STOPPED; |
462 | default: | 566 | break; |
463 | break; | 567 | default: |
568 | break; | ||
464 | } | 569 | } |
465 | break; | 570 | break; |
466 | case 4: /* matched "numwant" */ | 571 | case 4: /* matched "numwant" */ |
467 | len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); | 572 | len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE); |
468 | if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &numwant ) ) HTTPERROR_400_PARAM; | 573 | if ((len <= 0) || scan_fixed_int(write_ptr, len, &numwant)) |
469 | if( numwant < 0 ) numwant = 50; | 574 | HTTPERROR_400_PARAM; |
470 | if( numwant > 200 ) numwant = 200; | 575 | if (numwant < 0) |
576 | numwant = 50; | ||
577 | if (numwant > 200) | ||
578 | numwant = 200; | ||
471 | break; | 579 | break; |
472 | case 5: /* matched "compact" */ | 580 | case 5: /* matched "compact" */ |
473 | len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); | 581 | len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE); |
474 | if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &tmp ) ) HTTPERROR_400_PARAM; | 582 | if ((len <= 0) || scan_fixed_int(write_ptr, len, &tmp)) |
475 | if( !tmp ) HTTPERROR_400_COMPACT; | 583 | HTTPERROR_400_PARAM; |
584 | if (!tmp) | ||
585 | HTTPERROR_400_COMPACT; | ||
476 | break; | 586 | break; |
477 | case 6: /* matched "info_hash" */ | 587 | case 6: /* matched "info_hash" */ |
478 | if( ws->hash ) HTTPERROR_400_DOUBLEHASH; | 588 | if (ws->hash) |
589 | HTTPERROR_400_DOUBLEHASH; | ||
479 | /* ignore this, when we have less than 20 bytes */ | 590 | /* ignore this, when we have less than 20 bytes */ |
480 | if( scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) != 20 ) HTTPERROR_400_PARAM; | 591 | if (scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE) != 20) |
481 | ws->hash = (ot_hash*)write_ptr; | 592 | HTTPERROR_400_PARAM; |
593 | ws->hash = (ot_hash *)write_ptr; | ||
482 | break; | 594 | break; |
483 | #ifdef WANT_IP_FROM_QUERY_STRING | 595 | #ifdef WANT_IP_FROM_QUERY_STRING |
484 | case 7: /* matched "ip" */ | 596 | case 7: /* matched "ip" */ |
485 | { | 597 | { |
486 | char *tmp_buf1 = ws->reply, *tmp_buf2 = ws->reply+16; | 598 | char *tmp_buf1 = ws->reply, *tmp_buf2 = ws->reply + 16; |
487 | len = scan_urlencoded_query( &read_ptr, tmp_buf2, SCAN_SEARCHPATH_VALUE ); | 599 | len = scan_urlencoded_query(&read_ptr, tmp_buf2, SCAN_SEARCHPATH_VALUE); |
488 | tmp_buf2[len] = 0; | 600 | tmp_buf2[len] = 0; |
489 | if( ( len <= 0 ) || !scan_ip6( tmp_buf2, tmp_buf1 ) ) HTTPERROR_400_PARAM; | 601 | if ((len <= 0) || !scan_ip6(tmp_buf2, tmp_buf1)) |
490 | OT_SETIP( &ws->peer, tmp_buf1 ); | 602 | HTTPERROR_400_PARAM; |
491 | } | 603 | OT_SETIP(&ws->peer, tmp_buf1); |
492 | break; | 604 | } break; |
493 | #endif | 605 | #endif |
494 | #ifdef WANT_FULLLOG_NETWORKS | 606 | #ifdef WANT_FULLLOG_NETWORKS |
495 | case 8: /* matched "lognet" */ | 607 | case 8: /* matched "lognet" */ |
496 | { | 608 | { |
497 | //if( accesslist_isblessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) { | 609 | // if( accesslist_is_blessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) { |
498 | char *tmp_buf = ws->reply; | 610 | char *tmp_buf = ws->reply; |
499 | ot_net net; | 611 | ot_net net; |
500 | signed short parsed, bits; | 612 | signed short parsed, bits; |
501 | 613 | ||
502 | len = scan_urlencoded_query( &read_ptr, tmp_buf, SCAN_SEARCHPATH_VALUE ); | 614 | len = scan_urlencoded_query(&read_ptr, tmp_buf, SCAN_SEARCHPATH_VALUE); |
503 | tmp_buf[len] = 0; | 615 | tmp_buf[len] = 0; |
504 | if( len <= 0 ) HTTPERROR_400_PARAM; | 616 | if (len <= 0) |
505 | if( *tmp_buf == '-' ) { | 617 | HTTPERROR_400_PARAM; |
506 | loglist_reset( ); | 618 | if (*tmp_buf == '-') { |
507 | return ws->reply_size = sprintf( ws->reply, "Successfully removed.\n" ); | 619 | loglist_reset(); |
508 | } | 620 | return ws->reply_size = sprintf(ws->reply, "Successfully removed.\n"); |
509 | parsed = scan_ip6( tmp_buf, net.address ); | ||
510 | if( !parsed ) HTTPERROR_400_PARAM; | ||
511 | if( tmp_buf[parsed++] != '/' ) | ||
512 | bits = 128; | ||
513 | else { | ||
514 | parsed = scan_short( tmp_buf + parsed, &bits ); | ||
515 | if( !parsed ) HTTPERROR_400_PARAM; | ||
516 | if( ip6_isv4mapped( net.address ) ) | ||
517 | bits += 96; | ||
518 | } | ||
519 | net.bits = bits; | ||
520 | loglist_add_network( &net ); | ||
521 | return ws->reply_size = sprintf( ws->reply, "Successfully added.\n" ); | ||
522 | //} | ||
523 | } | 621 | } |
524 | break; | 622 | parsed = scan_ip6(tmp_buf, net.address); |
623 | if (!parsed) | ||
624 | HTTPERROR_400_PARAM; | ||
625 | if (tmp_buf[parsed++] != '/') | ||
626 | bits = 128; | ||
627 | else { | ||
628 | parsed = scan_short(tmp_buf + parsed, &bits); | ||
629 | if (!parsed) | ||
630 | HTTPERROR_400_PARAM; | ||
631 | if (ip6_isv4mapped(net.address)) | ||
632 | bits += 96; | ||
633 | } | ||
634 | net.bits = bits; | ||
635 | loglist_add_network(&net); | ||
636 | return ws->reply_size = sprintf(ws->reply, "Successfully added.\n"); | ||
637 | //} | ||
638 | } break; | ||
525 | #endif | 639 | #endif |
526 | case 9: /* matched "peer_id" */ | 640 | case 9: /* matched "peer_id" */ |
527 | /* ignore this, when we have less than 20 bytes */ | 641 | /* ignore this, when we have less than 20 bytes */ |
528 | if( scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) != 20 ) HTTPERROR_400_PARAM; | 642 | if (scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE) != 20) |
529 | ws->peer_id = write_ptr; | 643 | HTTPERROR_400_PARAM; |
530 | break; | 644 | ws->peer_id = write_ptr; |
645 | break; | ||
531 | } | 646 | } |
532 | } | 647 | } |
533 | 648 | ||
@@ -540,100 +655,107 @@ static ssize_t http_handle_announce( const int64 sock, struct ot_workstruct *ws, | |||
540 | */ | 655 | */ |
541 | 656 | ||
542 | /* Scanned whole query string */ | 657 | /* Scanned whole query string */ |
543 | if( !ws->hash ) | 658 | if (!ws->hash) |
544 | return ws->reply_size = sprintf( ws->reply, "d14:failure reason80:Your client forgot to send your torrent's info_hash. Please upgrade your client.e" ); | 659 | return ws->reply_size = sprintf(ws->reply, "d14:failure reason80:Your client forgot to send your torrent's info_hash. Please upgrade your client.e"); |
545 | 660 | ||
546 | if( OT_PEERFLAG( &ws->peer ) & PEER_FLAG_STOPPED ) | 661 | if (OT_PEERFLAG(&ws->peer) & PEER_FLAG_STOPPED) |
547 | ws->reply_size = remove_peer_from_torrent( FLAG_TCP, ws ); | 662 | ws->reply_size = remove_peer_from_torrent(FLAG_TCP, ws); |
548 | else | 663 | else |
549 | ws->reply_size = add_peer_to_torrent_and_return_peers( FLAG_TCP, ws, numwant ); | 664 | ws->reply_size = add_peer_to_torrent_and_return_peers(FLAG_TCP, ws, numwant); |
550 | 665 | ||
551 | stats_issue_event( EVENT_ANNOUNCE, FLAG_TCP, ws->reply_size); | 666 | stats_issue_event(EVENT_ANNOUNCE, FLAG_TCP, ws->reply_size); |
552 | return ws->reply_size; | 667 | return ws->reply_size; |
553 | } | 668 | } |
554 | 669 | ||
555 | ssize_t http_handle_request( const int64 sock, struct ot_workstruct *ws ) { | 670 | ssize_t http_handle_request(const int64 sock, struct ot_workstruct *ws) { |
556 | ssize_t reply_off, len; | 671 | ssize_t reply_off, len; |
557 | char *read_ptr = ws->request, *write_ptr; | 672 | char *read_ptr = ws->request, *write_ptr; |
558 | 673 | ||
559 | #ifdef WANT_FULLLOG_NETWORKS | 674 | #ifdef WANT_FULLLOG_NETWORKS |
560 | struct http_data *cookie = io_getcookie( sock ); | 675 | struct http_data *cookie = io_getcookie(sock); |
561 | if( loglist_check_address( cookie->ip ) ) { | 676 | if (loglist_check_address(cookie->ip)) { |
562 | ot_log *log = malloc( sizeof( ot_log ) ); | 677 | ot_log *log = malloc(sizeof(ot_log)); |
563 | if( log ) { | 678 | if (log) { |
564 | log->size = ws->request_size; | 679 | log->size = ws->request_size; |
565 | log->data = malloc( ws->request_size ); | 680 | log->data = malloc(ws->request_size); |
566 | log->next = 0; | 681 | log->next = 0; |
567 | log->time = g_now_seconds; | 682 | log->time = g_now_seconds; |
568 | memcpy( log->ip, cookie->ip, sizeof(ot_ip6)); | 683 | memcpy(log->ip, cookie->ip, sizeof(ot_ip6)); |
569 | if( log->data ) { | 684 | if (log->data) { |
570 | memcpy( log->data, ws->request, ws->request_size ); | 685 | memcpy(log->data, ws->request, ws->request_size); |
571 | if( !g_logchain_first ) | 686 | if (!g_logchain_first) |
572 | g_logchain_first = g_logchain_last = log; | 687 | g_logchain_first = g_logchain_last = log; |
573 | else { | 688 | else { |
574 | g_logchain_last->next = log; | 689 | g_logchain_last->next = log; |
575 | g_logchain_last = log; | 690 | g_logchain_last = log; |
576 | } | 691 | } |
577 | } else | 692 | } else |
578 | free( log ); | 693 | free(log); |
579 | } | 694 | } |
580 | } | 695 | } |
581 | #endif | 696 | #endif |
582 | 697 | ||
583 | #ifdef _DEBUG_HTTPERROR | 698 | #ifdef _DEBUG_HTTPERROR |
584 | reply_off = ws->request_size; | 699 | reply_off = ws->request_size; |
585 | if( ws->request_size >= G_DEBUGBUF_SIZE ) | 700 | if (ws->request_size >= G_DEBUGBUF_SIZE) |
586 | reply_off = G_DEBUGBUF_SIZE - 1; | 701 | reply_off = G_DEBUGBUF_SIZE - 1; |
587 | memcpy( ws->debugbuf, ws->request, reply_off ); | 702 | memcpy(ws->debugbuf, ws->request, reply_off); |
588 | ws->debugbuf[ reply_off ] = 0; | 703 | ws->debugbuf[reply_off] = 0; |
589 | #endif | 704 | #endif |
590 | 705 | ||
591 | /* Tell subroutines where to put reply data */ | 706 | /* Tell subroutines where to put reply data */ |
592 | ws->reply = ws->outbuf + SUCCESS_HTTP_HEADER_LENGTH; | 707 | ws->reply = ws->outbuf + SUCCESS_HTTP_HEADER_LENGTH; |
593 | 708 | ||
594 | /* This one implicitely tests strlen < 5, too -- remember, it is \n terminated */ | 709 | /* This one implicitely tests strlen < 5, too -- remember, it is \n terminated */ |
595 | if( memcmp( read_ptr, "GET /", 5) ) HTTPERROR_400; | 710 | if (memcmp(read_ptr, "GET /", 5)) |
711 | HTTPERROR_400; | ||
596 | 712 | ||
597 | /* Skip leading '/' */ | 713 | /* Skip leading '/' */ |
598 | for( read_ptr+=4; *read_ptr == '/'; ++read_ptr); | 714 | for (read_ptr += 4; *read_ptr == '/'; ++read_ptr) |
715 | ; | ||
599 | 716 | ||
600 | /* Try to parse the request. | 717 | /* Try to parse the request. |
601 | In reality we abandoned requiring the url to be correct. This now | 718 | In reality we abandoned requiring the url to be correct. This now |
602 | only decodes url encoded characters, we check for announces and | 719 | only decodes url encoded characters, we check for announces and |
603 | scrapes by looking for "a*" or "sc" */ | 720 | scrapes by looking for "a*" or "sc" */ |
604 | len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_PATH ); | 721 | len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_PATH); |
605 | 722 | ||
606 | /* If parsing returned an error, leave with not found */ | 723 | /* If parsing returned an error, leave with not found */ |
607 | if( g_redirecturl && ( len == -2 ) ) HTTPERROR_302; | 724 | if (g_redirecturl && (len == -2)) |
608 | if( len <= 0 ) HTTPERROR_404; | 725 | HTTPERROR_302; |
726 | if (len <= 0) | ||
727 | HTTPERROR_404; | ||
609 | 728 | ||
610 | /* This is the hardcore match for announce*/ | 729 | /* This is the hardcore match for announce*/ |
611 | if( ( *write_ptr == 'a' ) || ( *write_ptr == '?' ) ) | 730 | if ((*write_ptr == 'a') || (*write_ptr == '?')) |
612 | http_handle_announce( sock, ws, read_ptr ); | 731 | http_handle_announce(sock, ws, read_ptr); |
613 | #ifdef WANT_FULLSCRAPE | 732 | #ifdef WANT_FULLSCRAPE |
614 | else if( !memcmp( write_ptr, "scrape HTTP/", 12 ) ) | 733 | else if (!memcmp(write_ptr, "scrape HTTP/", 12)) |
615 | http_handle_fullscrape( sock, ws ); | 734 | http_handle_fullscrape(sock, ws); |
616 | #endif | 735 | #endif |
617 | /* This is the hardcore match for scrape */ | 736 | /* This is the hardcore match for scrape */ |
618 | else if( !memcmp( write_ptr, "sc", 2 ) ) | 737 | else if (!memcmp(write_ptr, "sc", 2)) |
619 | http_handle_scrape( sock, ws, read_ptr ); | 738 | http_handle_scrape(sock, ws, read_ptr); |
620 | /* All the rest is matched the standard way */ | 739 | /* All the rest is matched the standard way */ |
621 | else if( len == g_stats_path_len && !memcmp( write_ptr, g_stats_path, len ) ) | 740 | else if (len == g_stats_path_len && !memcmp(write_ptr, g_stats_path, len)) |
622 | http_handle_stats( sock, ws, read_ptr ); | 741 | http_handle_stats(sock, ws, read_ptr); |
623 | else | 742 | else |
624 | HTTPERROR_404; | 743 | HTTPERROR_404; |
625 | 744 | ||
626 | /* Find out if the client wants to keep this connection alive */ | 745 | /* Find out if the client wants to keep this connection alive */ |
627 | ws->keep_alive = 0; | 746 | ws->keep_alive = 0; |
628 | #ifdef WANT_KEEPALIVE | 747 | #ifdef WANT_KEEPALIVE |
629 | read_ptr=http_header( ws->request, ws->header_size, "connection"); | 748 | read_ptr = http_header(ws->request, ws->header_size, "connection"); |
630 | if( read_ptr && ( *read_ptr == 'K' || *read_ptr == 'k' ) ) ws->keep_alive = 1; | 749 | if (read_ptr && (*read_ptr == 'K' || *read_ptr == 'k')) |
750 | ws->keep_alive = 1; | ||
631 | #endif | 751 | #endif |
632 | 752 | ||
633 | /* If routines handled sending themselves, just return */ | 753 | /* If routines handled sending themselves, just return */ |
634 | if( ws->reply_size == -2 ) return 0; | 754 | if (ws->reply_size == -2) |
755 | return 0; | ||
635 | /* If routine failed, let http error take over */ | 756 | /* If routine failed, let http error take over */ |
636 | if( ws->reply_size <= 0 ) HTTPERROR_500; | 757 | if (ws->reply_size <= 0) |
758 | HTTPERROR_500; | ||
637 | 759 | ||
638 | /* This one is rather ugly, so I take you step by step through it. | 760 | /* This one is rather ugly, so I take you step by step through it. |
639 | 761 | ||
@@ -642,18 +764,16 @@ ssize_t http_handle_request( const int64 sock, struct ot_workstruct *ws ) { | |||
642 | plus dynamic space needed to expand our Content-Length value. We reserve SUCCESS_HTTP_SIZE_OFF for its expansion and calculate | 764 | plus dynamic space needed to expand our Content-Length value. We reserve SUCCESS_HTTP_SIZE_OFF for its expansion and calculate |
643 | the space NOT needed to expand in reply_off | 765 | the space NOT needed to expand in reply_off |
644 | */ | 766 | */ |
645 | reply_off = SUCCESS_HTTP_SIZE_OFF - snprintf( ws->outbuf, 0, "%zd", ws->reply_size ); | 767 | reply_off = SUCCESS_HTTP_SIZE_OFF - snprintf(ws->outbuf, 0, "%zd", ws->reply_size); |
646 | ws->reply = ws->outbuf + reply_off; | 768 | ws->reply = ws->outbuf + reply_off; |
647 | 769 | ||
648 | /* 2. Now we sprintf our header so that sprintf writes its terminating '\0' exactly one byte before content starts. Complete | 770 | /* 2. Now we sprintf our header so that sprintf writes its terminating '\0' exactly one byte before content starts. Complete |
649 | packet size is increased by size of header plus one byte '\n', we will copy over '\0' in next step */ | 771 | packet size is increased by size of header plus one byte '\n', we will copy over '\0' in next step */ |
650 | ws->reply_size += 1 + sprintf( ws->reply, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r", ws->reply_size ); | 772 | ws->reply_size += 1 + sprintf(ws->reply, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r", ws->reply_size); |
651 | 773 | ||
652 | /* 3. Finally we join both blocks neatly */ | 774 | /* 3. Finally we join both blocks neatly */ |
653 | ws->outbuf[ SUCCESS_HTTP_HEADER_LENGTH - 1 ] = '\n'; | 775 | ws->outbuf[SUCCESS_HTTP_HEADER_LENGTH - 1] = '\n'; |
654 | 776 | ||
655 | http_senddata( sock, ws ); | 777 | http_senddata(sock, ws); |
656 | return ws->reply_size; | 778 | return ws->reply_size; |
657 | } | 779 | } |
658 | |||
659 | const char *g_version_http_c = "$Source$: $Revision$\n"; | ||
@@ -7,9 +7,12 @@ | |||
7 | #define OT_HTTP_H__ | 7 | #define OT_HTTP_H__ |
8 | 8 | ||
9 | typedef enum { | 9 | typedef enum { |
10 | STRUCT_HTTP_FLAG_WAITINGFORTASK = 1, | 10 | STRUCT_HTTP_FLAG_WAITINGFORTASK = 1, |
11 | STRUCT_HTTP_FLAG_GZIP = 2, | 11 | STRUCT_HTTP_FLAG_GZIP = 2, |
12 | STRUCT_HTTP_FLAG_BZIP2 = 4 | 12 | STRUCT_HTTP_FLAG_BZIP2 = 4, |
13 | STRUCT_HTTP_FLAG_ZSTD = 8, | ||
14 | STRUCT_HTTP_FLAG_CHUNKED = 16, | ||
15 | STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER = 32 | ||
13 | } STRUCT_HTTP_FLAG; | 16 | } STRUCT_HTTP_FLAG; |
14 | 17 | ||
15 | struct http_data { | 18 | struct http_data { |
@@ -20,9 +23,9 @@ struct http_data { | |||
20 | STRUCT_HTTP_FLAG flag; | 23 | STRUCT_HTTP_FLAG flag; |
21 | }; | 24 | }; |
22 | 25 | ||
23 | ssize_t http_handle_request( const int64 s, struct ot_workstruct *ws ); | 26 | ssize_t http_handle_request(const int64 s, struct ot_workstruct *ws); |
24 | ssize_t http_sendiovecdata( const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ); | 27 | ssize_t http_sendiovecdata(const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial); |
25 | ssize_t http_issue_error( const int64 s, struct ot_workstruct *ws, int code ); | 28 | ssize_t http_issue_error(const int64 s, struct ot_workstruct *ws, int code); |
26 | 29 | ||
27 | extern char *g_stats_path; | 30 | extern char *g_stats_path; |
28 | extern ssize_t g_stats_path_len; | 31 | extern ssize_t g_stats_path_len; |
@@ -4,73 +4,89 @@ | |||
4 | $id$ */ | 4 | $id$ */ |
5 | 5 | ||
6 | /* System */ | 6 | /* System */ |
7 | #include <sys/types.h> | ||
8 | #include <stdlib.h> | 7 | #include <stdlib.h> |
9 | #include <unistd.h> | 8 | #include <sys/types.h> |
10 | #include <sys/uio.h> | 9 | #include <sys/uio.h> |
10 | #include <unistd.h> | ||
11 | 11 | ||
12 | /* Libowfat */ | 12 | /* Libowfat */ |
13 | 13 | ||
14 | /* Opentracker */ | 14 | /* Opentracker */ |
15 | #include "ot_iovec.h" | 15 | #include "ot_iovec.h" |
16 | 16 | ||
17 | void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc ) { | 17 | void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc) { |
18 | void *new_data; | 18 | void *new_data; |
19 | int new_entries = 1 + *iovec_entries; | 19 | int new_entries = 1 + *iovec_entries; |
20 | struct iovec *new_vec = realloc( *iovector, new_entries * sizeof( struct iovec ) ); | 20 | struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec)); |
21 | 21 | ||
22 | if( !new_vec ) | 22 | if (!new_vec) |
23 | return NULL; | 23 | return NULL; |
24 | 24 | ||
25 | /* Only allocate after we have a place to store the pointer */ | 25 | /* Only allocate after we have a place to store the pointer */ |
26 | new_data = malloc( new_alloc ); | 26 | new_data = malloc(new_alloc); |
27 | if( !new_data ) | 27 | if (!new_data) |
28 | return NULL; | 28 | return NULL; |
29 | 29 | ||
30 | new_vec[new_entries - 1].iov_base = new_data; | 30 | new_vec[new_entries - 1].iov_base = new_data; |
31 | new_vec[new_entries - 1].iov_len = new_alloc; | 31 | new_vec[new_entries - 1].iov_len = new_alloc; |
32 | 32 | ||
33 | *iovector = new_vec; | 33 | *iovector = new_vec; |
34 | ++*iovec_entries; | 34 | ++*iovec_entries; |
35 | return new_data; | 35 | return new_data; |
36 | } | 36 | } |
37 | 37 | ||
38 | void iovec_free( int *iovec_entries, struct iovec **iovector ) { | 38 | void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector) { |
39 | int new_entries = *iovec_entries + 1; | ||
40 | struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec)); | ||
41 | if (!new_vec) | ||
42 | return NULL; | ||
43 | |||
44 | /* Take over data from appended iovec */ | ||
45 | new_vec[*iovec_entries].iov_base = append_iovector->iov_base; | ||
46 | new_vec[*iovec_entries].iov_len = append_iovector->iov_len; | ||
47 | |||
48 | append_iovector->iov_base = NULL; | ||
49 | append_iovector->iov_len = 0; | ||
50 | |||
51 | *iovector = new_vec; | ||
52 | *iovec_entries = new_entries; | ||
53 | |||
54 | return new_vec; | ||
55 | } | ||
56 | |||
57 | void iovec_free(int *iovec_entries, struct iovec **iovector) { | ||
39 | int i; | 58 | int i; |
40 | for( i=0; i<*iovec_entries; ++i ) | 59 | for (i = 0; i < *iovec_entries; ++i) |
41 | free( ((*iovector)[i]).iov_base ); | 60 | free(((*iovector)[i]).iov_base); |
42 | *iovector = NULL; | 61 | *iovector = NULL; |
43 | *iovec_entries = 0; | 62 | *iovec_entries = 0; |
44 | } | 63 | } |
45 | 64 | ||
46 | void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr ) { | 65 | void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr) { |
47 | if( *iovec_entries ) { | 66 | if (*iovec_entries) { |
48 | char * base = (char*)((*iovector)[ *iovec_entries - 1 ]).iov_base; | 67 | char *base = (char *)((*iovector)[*iovec_entries - 1]).iov_base; |
49 | size_t new_alloc = ((char*)last_ptr) - base; | 68 | size_t new_alloc = ((char *)last_ptr) - base; |
50 | 69 | ||
51 | ((*iovector)[*iovec_entries - 1 ]).iov_base = realloc( base, new_alloc ); | 70 | ((*iovector)[*iovec_entries - 1]).iov_base = realloc(base, new_alloc); |
52 | ((*iovector)[*iovec_entries - 1 ]).iov_len = new_alloc; | 71 | ((*iovector)[*iovec_entries - 1]).iov_len = new_alloc; |
53 | } | 72 | } |
54 | } | 73 | } |
55 | 74 | ||
56 | void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc ) { | 75 | void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc) { |
57 | void *new_data; | 76 | void *new_data; |
58 | 77 | ||
59 | iovec_fixlast( iovec_entries, iovector, last_ptr ); | 78 | iovec_fixlast(iovec_entries, iovector, last_ptr); |
60 | 79 | ||
61 | if( !( new_data = iovec_increase( iovec_entries, iovector, new_alloc ) ) ) | 80 | if (!(new_data = iovec_increase(iovec_entries, iovector, new_alloc))) |
62 | iovec_free( iovec_entries, iovector ); | 81 | iovec_free(iovec_entries, iovector); |
63 | 82 | ||
64 | return new_data; | 83 | return new_data; |
65 | } | 84 | } |
66 | 85 | ||
67 | 86 | size_t iovec_length(const int *iovec_entries, const struct iovec **iovector) { | |
68 | size_t iovec_length( const int *iovec_entries, const struct iovec **iovector ) { | ||
69 | size_t length = 0; | 87 | size_t length = 0; |
70 | int i; | 88 | int i; |
71 | for( i=0; i<*iovec_entries; ++i ) | 89 | for (i = 0; i < *iovec_entries; ++i) |
72 | length += ((*iovector)[i]).iov_len; | 90 | length += ((*iovector)[i]).iov_len; |
73 | return length; | 91 | return length; |
74 | } | 92 | } |
75 | |||
76 | const char *g_version_iovec_c = "$Source$: $Revision$\n"; | ||
@@ -8,12 +8,13 @@ | |||
8 | 8 | ||
9 | #include <sys/uio.h> | 9 | #include <sys/uio.h> |
10 | 10 | ||
11 | void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc ); | 11 | void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc); |
12 | void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr ); | 12 | void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector); |
13 | void iovec_free( int *iovec_entries, struct iovec **iovector ); | 13 | void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr); |
14 | void iovec_free(int *iovec_entries, struct iovec **iovector); | ||
14 | 15 | ||
15 | size_t iovec_length( const int *iovec_entries, const struct iovec **iovector ); | 16 | size_t iovec_length(const int *iovec_entries, const struct iovec **iovector); |
16 | 17 | ||
17 | void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc ); | 18 | void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc); |
18 | 19 | ||
19 | #endif | 20 | #endif |
diff --git a/ot_livesync.c b/ot_livesync.c index cded0f7..269b8d8 100644 --- a/ot_livesync.c +++ b/ot_livesync.c | |||
@@ -4,204 +4,228 @@ | |||
4 | $id$ */ | 4 | $id$ */ |
5 | 5 | ||
6 | /* System */ | 6 | /* System */ |
7 | #include <pthread.h> | ||
8 | #include <stdlib.h> | ||
9 | #include <string.h> | ||
7 | #include <sys/types.h> | 10 | #include <sys/types.h> |
8 | #include <sys/uio.h> | 11 | #include <sys/uio.h> |
9 | #include <string.h> | ||
10 | #include <pthread.h> | ||
11 | #include <unistd.h> | 12 | #include <unistd.h> |
12 | #include <stdlib.h> | ||
13 | 13 | ||
14 | /* Libowfat */ | 14 | /* Libowfat */ |
15 | #include "socket.h" | ||
16 | #include "ndelay.h" | ||
17 | #include "byte.h" | 15 | #include "byte.h" |
18 | #include "ip6.h" | 16 | #include "ip6.h" |
17 | #include "ndelay.h" | ||
18 | #include "socket.h" | ||
19 | 19 | ||
20 | /* Opentracker */ | 20 | /* Opentracker */ |
21 | #include "trackerlogic.h" | ||
22 | #include "ot_livesync.h" | ||
23 | #include "ot_accesslist.h" | 21 | #include "ot_accesslist.h" |
24 | #include "ot_stats.h" | 22 | #include "ot_livesync.h" |
25 | #include "ot_mutex.h" | 23 | #include "ot_mutex.h" |
24 | #include "ot_stats.h" | ||
25 | #include "trackerlogic.h" | ||
26 | 26 | ||
27 | #ifdef WANT_SYNC_LIVE | 27 | #ifdef WANT_SYNC_LIVE |
28 | 28 | ||
29 | char groupip_1[4] = { 224,0,23,5 }; | 29 | char groupip_1[4] = {224, 0, 23, 5}; |
30 | 30 | ||
31 | #define LIVESYNC_INCOMING_BUFFSIZE (256*256) | 31 | #define LIVESYNC_INCOMING_BUFFSIZE (256 * 256) |
32 | 32 | ||
33 | #define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480 | 33 | #define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480 |
34 | #define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer)+sizeof(ot_hash)) | 34 | #define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer) + sizeof(ot_hash)) |
35 | 35 | ||
36 | #define LIVESYNC_MAXDELAY 15 /* seconds */ | 36 | #define LIVESYNC_MAXDELAY 15 /* seconds */ |
37 | 37 | ||
38 | enum { OT_SYNC_PEER }; | 38 | enum { OT_SYNC_PEER4, OT_SYNC_PEER6 }; |
39 | 39 | ||
40 | /* Forward declaration */ | 40 | /* Forward declaration */ |
41 | static void * livesync_worker( void * args ); | 41 | static void *livesync_worker(void *args); |
42 | 42 | ||
43 | /* For outgoing packets */ | 43 | /* For outgoing packets */ |
44 | static int64 g_socket_in = -1; | 44 | static int64 g_socket_in = -1; |
45 | 45 | ||
46 | /* For incoming packets */ | 46 | /* For incoming packets */ |
47 | static int64 g_socket_out = -1; | 47 | static int64 g_socket_out = -1; |
48 | 48 | ||
49 | static pthread_mutex_t g_outbuf_mutex = PTHREAD_MUTEX_INITIALIZER; | 49 | static pthread_mutex_t g_outbuf_mutex = PTHREAD_MUTEX_INITIALIZER; |
50 | char g_outbuf[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; | 50 | typedef struct { |
51 | static size_t g_outbuf_data; | 51 | uint8_t data[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; |
52 | static ot_time g_next_packet_time; | 52 | size_t fill; |
53 | ot_time next_packet_time; | ||
54 | } sync_buffer; | ||
53 | 55 | ||
54 | static pthread_t thread_id; | 56 | static sync_buffer g_v6_buf; |
55 | void livesync_init( ) { | 57 | static sync_buffer g_v4_buf; |
56 | 58 | ||
57 | if( g_socket_in == -1 ) | 59 | static pthread_t thread_id; |
58 | exerr( "No socket address for live sync specified." ); | 60 | void livesync_init() { |
61 | |||
62 | if (g_socket_in == -1) | ||
63 | exerr("No socket address for live sync specified."); | ||
59 | 64 | ||
60 | /* Prepare outgoing peers buffer */ | 65 | /* Prepare outgoing peers buffer */ |
61 | memcpy( g_outbuf, &g_tracker_id, sizeof( g_tracker_id ) ); | 66 | memcpy(g_v6_buf.data, &g_tracker_id, sizeof(g_tracker_id)); |
62 | uint32_pack_big( g_outbuf + sizeof( g_tracker_id ), OT_SYNC_PEER); | 67 | memcpy(g_v4_buf.data, &g_tracker_id, sizeof(g_tracker_id)); |
63 | g_outbuf_data = sizeof( g_tracker_id ) + sizeof( uint32_t ); | 68 | |
69 | uint32_pack_big((char *)g_v6_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER6); | ||
70 | uint32_pack_big((char *)g_v4_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER4); | ||
71 | |||
72 | g_v6_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t); | ||
73 | g_v4_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t); | ||
64 | 74 | ||
65 | g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; | 75 | g_v6_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; |
76 | g_v4_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; | ||
66 | 77 | ||
67 | pthread_create( &thread_id, NULL, livesync_worker, NULL ); | 78 | pthread_create(&thread_id, NULL, livesync_worker, NULL); |
68 | } | 79 | } |
69 | 80 | ||
70 | void livesync_deinit() { | 81 | void livesync_deinit() { |
71 | if( g_socket_in != -1 ) | 82 | if (g_socket_in != -1) |
72 | close( g_socket_in ); | 83 | close(g_socket_in); |
73 | if( g_socket_out != -1 ) | 84 | if (g_socket_out != -1) |
74 | close( g_socket_out ); | 85 | close(g_socket_out); |
75 | 86 | ||
76 | pthread_cancel( thread_id ); | 87 | pthread_cancel(thread_id); |
77 | } | 88 | } |
78 | 89 | ||
79 | void livesync_bind_mcast( ot_ip6 ip, uint16_t port) { | 90 | void livesync_bind_mcast(ot_ip6 ip, uint16_t port) { |
80 | char tmpip[4] = {0,0,0,0}; | 91 | char tmpip[4] = {0, 0, 0, 0}; |
81 | char *v4ip; | 92 | char *v4ip; |
82 | 93 | ||
83 | if( !ip6_isv4mapped(ip)) | 94 | if (!ip6_isv4mapped(ip)) |
84 | exerr("v6 mcast support not yet available."); | 95 | exerr("v6 mcast support not yet available."); |
85 | v4ip = ip+12; | 96 | v4ip = ip + 12; |
86 | 97 | ||
87 | if( g_socket_in != -1 ) | 98 | if (g_socket_in != -1) |
88 | exerr("Error: Livesync listen ip specified twice."); | 99 | exerr("Error: Livesync listen ip specified twice."); |
89 | 100 | ||
90 | if( ( g_socket_in = socket_udp4( )) < 0) | 101 | if ((g_socket_in = socket_udp4()) < 0) |
91 | exerr("Error: Cant create live sync incoming socket." ); | 102 | exerr("Error: Cant create live sync incoming socket."); |
92 | ndelay_off(g_socket_in); | 103 | ndelay_off(g_socket_in); |
93 | 104 | ||
94 | if( socket_bind4_reuse( g_socket_in, tmpip, port ) == -1 ) | 105 | if (socket_bind4_reuse(g_socket_in, tmpip, port) == -1) |
95 | exerr("Error: Cant bind live sync incoming socket." ); | 106 | exerr("Error: Cant bind live sync incoming socket."); |
96 | 107 | ||
97 | if( socket_mcjoin4( g_socket_in, groupip_1, v4ip ) ) | 108 | if (socket_mcjoin4(g_socket_in, groupip_1, v4ip)) |
98 | exerr("Error: Cant make live sync incoming socket join mcast group."); | 109 | exerr("Error: Cant make live sync incoming socket join mcast group."); |
99 | 110 | ||
100 | if( ( g_socket_out = socket_udp4()) < 0) | 111 | if ((g_socket_out = socket_udp4()) < 0) |
101 | exerr("Error: Cant create live sync outgoing socket." ); | 112 | exerr("Error: Cant create live sync outgoing socket."); |
102 | if( socket_bind4_reuse( g_socket_out, v4ip, port ) == -1 ) | 113 | if (socket_bind4_reuse(g_socket_out, v4ip, port) == -1) |
103 | exerr("Error: Cant bind live sync outgoing socket." ); | 114 | exerr("Error: Cant bind live sync outgoing socket."); |
104 | 115 | ||
105 | socket_mcttl4(g_socket_out, 1); | 116 | socket_mcttl4(g_socket_out, 1); |
106 | socket_mcloop4(g_socket_out, 0); | 117 | socket_mcloop4(g_socket_out, 0); |
107 | } | 118 | } |
108 | 119 | ||
109 | /* Caller MUST hold g_outbuf_mutex. Returns with g_outbuf_mutex unlocked */ | 120 | /* Caller MUST hold g_outbuf_mutex. Returns with g_outbuf_mutex unlocked */ |
110 | static void livesync_issue_peersync( ) { | 121 | static void livesync_issue_peersync(sync_buffer *buf) { |
111 | char mycopy[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; | 122 | char mycopy[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; |
112 | size_t data = g_outbuf_data; | 123 | size_t fill = buf->fill; |
113 | 124 | ||
114 | memcpy( mycopy, g_outbuf, data ); | 125 | memcpy(mycopy, buf->data, fill); |
115 | g_outbuf_data = sizeof( g_tracker_id ) + sizeof( uint32_t ); | 126 | buf->fill = sizeof(g_tracker_id) + sizeof(uint32_t); |
116 | g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; | 127 | buf->next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; |
117 | 128 | ||
118 | /* From now this thread has a local copy of the buffer and | 129 | /* From now this thread has a local copy of the buffer and |
119 | has modified the protected element */ | 130 | has modified the protected element */ |
120 | pthread_mutex_unlock(&g_outbuf_mutex); | 131 | pthread_mutex_unlock(&g_outbuf_mutex); |
121 | 132 | ||
122 | socket_send4(g_socket_out, mycopy, data, groupip_1, LIVESYNC_PORT); | 133 | socket_send4(g_socket_out, mycopy, fill, groupip_1, LIVESYNC_PORT); |
123 | } | 134 | } |
124 | 135 | ||
125 | static void livesync_handle_peersync( struct ot_workstruct *ws ) { | 136 | static void livesync_handle_peersync(struct ot_workstruct *ws, size_t peer_size) { |
126 | int off = sizeof( g_tracker_id ) + sizeof( uint32_t ); | 137 | size_t off = sizeof(g_tracker_id) + sizeof(uint32_t); |
127 | 138 | ||
128 | /* Now basic sanity checks have been done on the live sync packet | 139 | /* Now basic sanity checks have been done on the live sync packet |
129 | We might add more testing and logging. */ | 140 | We might add more testing and logging. */ |
130 | while( off + (ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ) <= ws->request_size ) { | 141 | while ((ssize_t)(off + sizeof(ot_hash) + peer_size) <= ws->request_size) { |
131 | memcpy( &ws->peer, ws->request + off + sizeof(ot_hash), sizeof( ot_peer ) ); | 142 | memcpy(&ws->peer, ws->request + off + sizeof(ot_hash), peer_size); |
132 | ws->hash = (ot_hash*)(ws->request + off); | 143 | ws->hash = (ot_hash *)(ws->request + off); |
133 | 144 | ||
134 | if( !g_opentracker_running ) return; | 145 | if (!g_opentracker_running) |
146 | return; | ||
135 | 147 | ||
136 | if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_STOPPED ) | 148 | if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED) |
137 | remove_peer_from_torrent( FLAG_MCA, ws ); | 149 | remove_peer_from_torrent(FLAG_MCA, ws); |
138 | else | 150 | else |
139 | add_peer_to_torrent_and_return_peers( FLAG_MCA, ws, /* amount = */ 0 ); | 151 | add_peer_to_torrent_and_return_peers(FLAG_MCA, ws, /* amount = */ 0); |
140 | 152 | ||
141 | off += sizeof( ot_hash ) + sizeof( ot_peer ); | 153 | off += sizeof(ot_hash) + peer_size; |
142 | } | 154 | } |
143 | 155 | ||
144 | stats_issue_event(EVENT_SYNC, 0, | 156 | stats_issue_event(EVENT_SYNC, 0, (ws->request_size - sizeof(g_tracker_id) - sizeof(uint32_t)) / ((ssize_t)sizeof(ot_hash) + peer_size)); |
145 | (ws->request_size - sizeof( g_tracker_id ) - sizeof( uint32_t ) ) / | ||
146 | ((ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ))); | ||
147 | } | 157 | } |
148 | 158 | ||
149 | /* Tickle the live sync module from time to time, so no events get | 159 | /* Tickle the live sync module from time to time, so no events get |
150 | stuck when there's not enough traffic to fill udp packets fast | 160 | stuck when there's not enough traffic to fill udp packets fast |
151 | enough */ | 161 | enough */ |
152 | void livesync_ticker( ) { | 162 | void livesync_ticker() { |
153 | /* livesync_issue_peersync sets g_next_packet_time */ | 163 | /* livesync_issue_peersync sets g_next_packet_time */ |
154 | pthread_mutex_lock(&g_outbuf_mutex); | 164 | pthread_mutex_lock(&g_outbuf_mutex); |
155 | if( g_now_seconds > g_next_packet_time && | 165 | if (g_now_seconds > g_v6_buf.next_packet_time && g_v6_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t)) |
156 | g_outbuf_data > sizeof( g_tracker_id ) + sizeof( uint32_t ) ) | 166 | livesync_issue_peersync(&g_v6_buf); |
157 | livesync_issue_peersync(); | 167 | else |
168 | pthread_mutex_unlock(&g_outbuf_mutex); | ||
169 | |||
170 | pthread_mutex_lock(&g_outbuf_mutex); | ||
171 | if (g_now_seconds > g_v4_buf.next_packet_time && g_v4_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t)) | ||
172 | livesync_issue_peersync(&g_v4_buf); | ||
158 | else | 173 | else |
159 | pthread_mutex_unlock(&g_outbuf_mutex); | 174 | pthread_mutex_unlock(&g_outbuf_mutex); |
160 | } | 175 | } |
161 | 176 | ||
162 | /* Inform live sync about whats going on. */ | 177 | /* Inform live sync about whats going on. */ |
163 | void livesync_tell( struct ot_workstruct *ws ) { | 178 | void livesync_tell(struct ot_workstruct *ws) { |
179 | size_t peer_size; /* initialized in next line */ | ||
180 | ot_peer *peer_src = peer_from_peer6(&ws->peer, &peer_size); | ||
181 | sync_buffer *dest_buf = peer_size == OT_PEER_SIZE6 ? &g_v6_buf : &g_v4_buf; | ||
182 | |||
164 | pthread_mutex_lock(&g_outbuf_mutex); | 183 | pthread_mutex_lock(&g_outbuf_mutex); |
165 | 184 | ||
166 | memcpy( g_outbuf + g_outbuf_data, ws->hash, sizeof(ot_hash) ); | 185 | memcpy(dest_buf->data + dest_buf->fill, ws->hash, sizeof(ot_hash)); |
167 | memcpy( g_outbuf + g_outbuf_data + sizeof(ot_hash), &ws->peer, sizeof(ot_peer) ); | 186 | dest_buf->fill += sizeof(ot_hash); |
168 | 187 | ||
169 | g_outbuf_data += sizeof(ot_hash) + sizeof(ot_peer); | 188 | memcpy(dest_buf->data + dest_buf->fill, peer_src, peer_size); |
189 | dest_buf->fill += peer_size; | ||
170 | 190 | ||
171 | if( g_outbuf_data >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS ) | 191 | if (dest_buf->fill >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS) |
172 | livesync_issue_peersync(); | 192 | livesync_issue_peersync(dest_buf); |
173 | else | 193 | else |
174 | pthread_mutex_unlock(&g_outbuf_mutex); | 194 | pthread_mutex_unlock(&g_outbuf_mutex); |
175 | } | 195 | } |
176 | 196 | ||
177 | static void * livesync_worker( void * args ) { | 197 | static void *livesync_worker(void *args) { |
178 | struct ot_workstruct ws; | 198 | struct ot_workstruct ws; |
179 | ot_ip6 in_ip; uint16_t in_port; | 199 | ot_ip6 in_ip; |
200 | uint16_t in_port; | ||
180 | 201 | ||
181 | (void)args; | 202 | (void)args; |
182 | 203 | ||
183 | /* Initialize our "thread local storage" */ | 204 | /* Initialize our "thread local storage" */ |
184 | ws.inbuf = ws.request = malloc( LIVESYNC_INCOMING_BUFFSIZE ); | 205 | ws.inbuf = ws.request = malloc(LIVESYNC_INCOMING_BUFFSIZE); |
185 | ws.outbuf = ws.reply = 0; | 206 | ws.outbuf = ws.reply = 0; |
186 | 207 | ||
187 | memcpy( in_ip, V4mappedprefix, sizeof( V4mappedprefix ) ); | 208 | memcpy(in_ip, V4mappedprefix, sizeof(V4mappedprefix)); |
188 | 209 | ||
189 | while( 1 ) { | 210 | while (1) { |
190 | ws.request_size = socket_recv4(g_socket_in, (char*)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12+(char*)in_ip, &in_port); | 211 | ws.request_size = socket_recv4(g_socket_in, (char *)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12 + (char *)in_ip, &in_port); |
191 | 212 | ||
192 | /* Expect at least tracker id and packet type */ | 213 | /* Expect at least tracker id and packet type */ |
193 | if( ws.request_size <= (ssize_t)(sizeof( g_tracker_id ) + sizeof( uint32_t )) ) | 214 | if (ws.request_size <= (ssize_t)(sizeof(g_tracker_id) + sizeof(uint32_t))) |
194 | continue; | 215 | continue; |
195 | if( !accesslist_isblessed(in_ip, OT_PERMISSION_MAY_LIVESYNC)) | 216 | if (!accesslist_is_blessed(in_ip, OT_PERMISSION_MAY_LIVESYNC)) |
196 | continue; | 217 | continue; |
197 | if( !memcmp( ws.inbuf, &g_tracker_id, sizeof( g_tracker_id ) ) ) { | 218 | if (!memcmp(ws.inbuf, &g_tracker_id, sizeof(g_tracker_id))) { |
198 | /* TODO: log packet coming from ourselves */ | 219 | /* TODO: log packet coming from ourselves */ |
199 | continue; | 220 | continue; |
200 | } | 221 | } |
201 | 222 | ||
202 | switch( uint32_read_big( sizeof( g_tracker_id ) + (char *)ws.inbuf ) ) { | 223 | switch (uint32_read_big(sizeof(g_tracker_id) + (char *)ws.inbuf)) { |
203 | case OT_SYNC_PEER: | 224 | case OT_SYNC_PEER6: |
204 | livesync_handle_peersync( &ws ); | 225 | livesync_handle_peersync(&ws, OT_PEER_SIZE6); |
226 | break; | ||
227 | case OT_SYNC_PEER4: | ||
228 | livesync_handle_peersync(&ws, OT_PEER_SIZE4); | ||
205 | break; | 229 | break; |
206 | default: | 230 | default: |
207 | break; | 231 | break; |
@@ -213,4 +237,3 @@ static void * livesync_worker( void * args ) { | |||
213 | } | 237 | } |
214 | 238 | ||
215 | #endif | 239 | #endif |
216 | const char *g_version_livesync_c = "$Source$: $Revision$\n"; | ||
diff --git a/ot_livesync.h b/ot_livesync.h index d7490e5..cb28774 100644 --- a/ot_livesync.h +++ b/ot_livesync.h | |||
@@ -28,13 +28,19 @@ | |||
28 | Each tracker instance accumulates announce requests until its buffer is | 28 | Each tracker instance accumulates announce requests until its buffer is |
29 | full or a timeout is reached. Then it broadcasts its live sync packer: | 29 | full or a timeout is reached. Then it broadcasts its live sync packer: |
30 | 30 | ||
31 | packet type SYNC_LIVE | 31 | packet type SYNC_LIVE4 |
32 | [ 0x0008 0x14 info_hash | 32 | [ 0x0008 0x14 info_hash |
33 | 0x001c 0x04 peer's ipv4 address | 33 | 0x001c 0x04 peer's ipv4 address |
34 | 0x0020 0x02 peer's port | 34 | 0x0020 0x02 peer's port |
35 | 0x0024 0x02 peer flags v1 ( SEEDING = 0x80, COMPLETE = 0x40, STOPPED = 0x20 ) | 35 | 0x0024 0x02 peer flags v1 ( SEEDING = 0x80, COMPLETE = 0x40, STOPPED = 0x20 ) |
36 | ]* | 36 | ]* |
37 | 37 | ||
38 | packet type SYNC_LIVE6 | ||
39 | [ 0x0008 0x14 info_hash | ||
40 | 0x001c 0x10 peer's ipv6 address | ||
41 | 0x002c 0x02 peer's port | ||
42 | 0x002e 0x02 peer flags v1 ( SEEDING = 0x80, COMPLETE = 0x40, STOPPED = 0x20 ) | ||
43 | ]* | ||
38 | */ | 44 | */ |
39 | 45 | ||
40 | #ifdef WANT_SYNC_LIVE | 46 | #ifdef WANT_SYNC_LIVE |
@@ -45,18 +51,18 @@ void livesync_init(); | |||
45 | void livesync_deinit(); | 51 | void livesync_deinit(); |
46 | 52 | ||
47 | /* Join multicast group for listening and create sending socket */ | 53 | /* Join multicast group for listening and create sending socket */ |
48 | void livesync_bind_mcast( char *ip, uint16_t port ); | 54 | void livesync_bind_mcast(char *ip, uint16_t port); |
49 | 55 | ||
50 | /* Inform live sync about whats going on. */ | 56 | /* Inform live sync about whats going on. */ |
51 | void livesync_tell( struct ot_workstruct *ws ); | 57 | void livesync_tell(struct ot_workstruct *ws); |
52 | 58 | ||
53 | /* Tickle the live sync module from time to time, so no events get | 59 | /* Tickle the live sync module from time to time, so no events get |
54 | stuck when there's not enough traffic to fill udp packets fast | 60 | stuck when there's not enough traffic to fill udp packets fast |
55 | enough */ | 61 | enough */ |
56 | void livesync_ticker( ); | 62 | void livesync_ticker(); |
57 | 63 | ||
58 | /* Handle an incoming live sync packet */ | 64 | /* Handle an incoming live sync packet */ |
59 | void handle_livesync( const int64 sock ); | 65 | void handle_livesync(const int64 sock); |
60 | 66 | ||
61 | #else | 67 | #else |
62 | 68 | ||
@@ -16,42 +16,39 @@ | |||
16 | #include "uint32.h" | 16 | #include "uint32.h" |
17 | 17 | ||
18 | /* Opentracker */ | 18 | /* Opentracker */ |
19 | #include "trackerlogic.h" | 19 | #include "ot_iovec.h" |
20 | #include "ot_mutex.h" | 20 | #include "ot_mutex.h" |
21 | #include "ot_stats.h" | 21 | #include "ot_stats.h" |
22 | #include "trackerlogic.h" | ||
22 | 23 | ||
23 | /* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */ | 24 | /* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */ |
24 | #define MTX_DBG( STRING ) | 25 | #define MTX_DBG(STRING) |
25 | 26 | ||
26 | /* Our global all torrents list */ | 27 | /* Our global all torrents list */ |
27 | static ot_vector all_torrents[OT_BUCKET_COUNT]; | 28 | static ot_vector all_torrents[OT_BUCKET_COUNT]; |
28 | static pthread_mutex_t bucket_mutex[OT_BUCKET_COUNT]; | 29 | static pthread_mutex_t bucket_mutex[OT_BUCKET_COUNT]; |
29 | static size_t g_torrent_count; | 30 | static size_t g_torrent_count; |
30 | 31 | ||
31 | /* Self pipe from opentracker.c */ | 32 | /* Self pipe from opentracker.c */ |
32 | extern int g_self_pipe[2]; | 33 | extern int g_self_pipe[2]; |
33 | 34 | ||
34 | ot_vector *mutex_bucket_lock( int bucket ) { | 35 | ot_vector *mutex_bucket_lock(int bucket) { |
35 | pthread_mutex_lock(bucket_mutex + bucket ); | 36 | pthread_mutex_lock(bucket_mutex + bucket); |
36 | return all_torrents + bucket; | 37 | return all_torrents + bucket; |
37 | } | 38 | } |
38 | 39 | ||
39 | ot_vector *mutex_bucket_lock_by_hash( ot_hash hash ) { | 40 | ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash) { return mutex_bucket_lock(uint32_read_big((const char *)hash) >> OT_BUCKET_COUNT_SHIFT); } |
40 | return mutex_bucket_lock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT ); | ||
41 | } | ||
42 | 41 | ||
43 | void mutex_bucket_unlock( int bucket, int delta_torrentcount ) { | 42 | void mutex_bucket_unlock(int bucket, int delta_torrentcount) { |
44 | pthread_mutex_unlock(bucket_mutex + bucket); | 43 | pthread_mutex_unlock(bucket_mutex + bucket); |
45 | g_torrent_count += delta_torrentcount; | 44 | g_torrent_count += delta_torrentcount; |
46 | } | 45 | } |
47 | 46 | ||
48 | void mutex_bucket_unlock_by_hash( ot_hash hash, int delta_torrentcount ) { | 47 | void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount) { |
49 | mutex_bucket_unlock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount ); | 48 | mutex_bucket_unlock(uint32_read_big((char *)hash) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount); |
50 | } | 49 | } |
51 | 50 | ||
52 | size_t mutex_get_torrent_count( ) { | 51 | size_t mutex_get_torrent_count() { return g_torrent_count; } |
53 | return g_torrent_count; | ||
54 | } | ||
55 | 52 | ||
56 | /* TaskQueue Magic */ | 53 | /* TaskQueue Magic */ |
57 | 54 | ||
@@ -64,32 +61,17 @@ struct ot_task { | |||
64 | struct ot_task *next; | 61 | struct ot_task *next; |
65 | }; | 62 | }; |
66 | 63 | ||
67 | static ot_taskid next_free_taskid = 1; | 64 | static ot_taskid next_free_taskid = 1; |
68 | static struct ot_task *tasklist; | 65 | static struct ot_task *tasklist; |
69 | static pthread_mutex_t tasklist_mutex; | 66 | static pthread_mutex_t tasklist_mutex; |
70 | static pthread_cond_t tasklist_being_filled; | 67 | static pthread_cond_t tasklist_being_filled; |
71 | 68 | ||
72 | int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) { | 69 | int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype) { |
73 | struct ot_task ** tmptask, * task; | 70 | struct ot_task **tmptask, *task; |
74 | 71 | ||
75 | /* Want exclusive access to tasklist */ | 72 | task = malloc(sizeof(struct ot_task)); |
76 | MTX_DBG( "pushtask locks.\n" ); | 73 | if (!task) |
77 | pthread_mutex_lock( &tasklist_mutex ); | ||
78 | MTX_DBG( "pushtask locked.\n" ); | ||
79 | |||
80 | task = malloc(sizeof( struct ot_task)); | ||
81 | if( !task ) { | ||
82 | MTX_DBG( "pushtask fail unlocks.\n" ); | ||
83 | pthread_mutex_unlock( &tasklist_mutex ); | ||
84 | MTX_DBG( "pushtask fail unlocked.\n" ); | ||
85 | return -1; | 74 | return -1; |
86 | } | ||
87 | |||
88 | /* Skip to end of list */ | ||
89 | tmptask = &tasklist; | ||
90 | while( *tmptask ) | ||
91 | tmptask = &(*tmptask)->next; | ||
92 | *tmptask = task; | ||
93 | 75 | ||
94 | task->taskid = 0; | 76 | task->taskid = 0; |
95 | task->tasktype = tasktype; | 77 | task->tasktype = tasktype; |
@@ -98,183 +80,193 @@ int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) { | |||
98 | task->iovec = NULL; | 80 | task->iovec = NULL; |
99 | task->next = 0; | 81 | task->next = 0; |
100 | 82 | ||
83 | /* Want exclusive access to tasklist */ | ||
84 | pthread_mutex_lock(&tasklist_mutex); | ||
85 | |||
86 | /* Skip to end of list */ | ||
87 | tmptask = &tasklist; | ||
88 | while (*tmptask) | ||
89 | tmptask = &(*tmptask)->next; | ||
90 | *tmptask = task; | ||
91 | |||
101 | /* Inform waiting workers and release lock */ | 92 | /* Inform waiting workers and release lock */ |
102 | MTX_DBG( "pushtask broadcasts.\n" ); | 93 | pthread_cond_broadcast(&tasklist_being_filled); |
103 | pthread_cond_broadcast( &tasklist_being_filled ); | 94 | pthread_mutex_unlock(&tasklist_mutex); |
104 | MTX_DBG( "pushtask broadcasted, mutex unlocks.\n" ); | ||
105 | pthread_mutex_unlock( &tasklist_mutex ); | ||
106 | MTX_DBG( "pushtask end mutex unlocked.\n" ); | ||
107 | return 0; | 95 | return 0; |
108 | } | 96 | } |
109 | 97 | ||
110 | void mutex_workqueue_canceltask( int64 sock ) { | 98 | void mutex_workqueue_canceltask(int64 sock) { |
111 | struct ot_task ** task; | 99 | struct ot_task **task; |
112 | 100 | ||
113 | /* Want exclusive access to tasklist */ | 101 | /* Want exclusive access to tasklist */ |
114 | MTX_DBG( "canceltask locks.\n" ); | 102 | pthread_mutex_lock(&tasklist_mutex); |
115 | pthread_mutex_lock( &tasklist_mutex ); | ||
116 | MTX_DBG( "canceltask locked.\n" ); | ||
117 | |||
118 | task = &tasklist; | ||
119 | while( *task && ( (*task)->sock != sock ) ) | ||
120 | *task = (*task)->next; | ||
121 | 103 | ||
122 | if( *task && ( (*task)->sock == sock ) ) { | 104 | for (task = &tasklist; *task; task = &((*task)->next)) |
123 | struct iovec *iovec = (*task)->iovec; | 105 | if ((*task)->sock == sock) { |
124 | struct ot_task *ptask = *task; | 106 | struct iovec *iovec = (*task)->iovec; |
125 | int i; | 107 | struct ot_task *ptask = *task; |
108 | int i; | ||
126 | 109 | ||
127 | /* Free task's iovec */ | 110 | /* Free task's iovec */ |
128 | for( i=0; i<(*task)->iovec_entries; ++i ) | 111 | for (i = 0; i < (*task)->iovec_entries; ++i) |
129 | free( iovec[i].iov_base ); | 112 | free(iovec[i].iov_base); |
130 | 113 | ||
131 | *task = (*task)->next; | 114 | *task = (*task)->next; |
132 | free( ptask ); | 115 | free(ptask); |
133 | } | 116 | break; |
117 | } | ||
134 | 118 | ||
135 | /* Release lock */ | 119 | /* Release lock */ |
136 | MTX_DBG( "canceltask unlocks.\n" ); | 120 | pthread_mutex_unlock(&tasklist_mutex); |
137 | pthread_mutex_unlock( &tasklist_mutex ); | ||
138 | MTX_DBG( "canceltask unlocked.\n" ); | ||
139 | } | 121 | } |
140 | 122 | ||
141 | ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ) { | 123 | ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype) { |
142 | struct ot_task * task; | 124 | struct ot_task *task; |
143 | ot_taskid taskid = 0; | 125 | ot_taskid taskid = 0; |
144 | 126 | ||
145 | /* Want exclusive access to tasklist */ | 127 | /* Want exclusive access to tasklist */ |
146 | MTX_DBG( "poptask mutex locks.\n" ); | 128 | pthread_mutex_lock(&tasklist_mutex); |
147 | pthread_mutex_lock( &tasklist_mutex ); | ||
148 | MTX_DBG( "poptask mutex locked.\n" ); | ||
149 | 129 | ||
150 | while( !taskid ) { | 130 | while (!taskid) { |
151 | /* Skip to the first unassigned task this worker wants to do */ | 131 | /* Skip to the first unassigned task this worker wants to do */ |
152 | task = tasklist; | 132 | for (task = tasklist; task; task = task->next) |
153 | while( task && ( ( ( TASK_CLASS_MASK & task->tasktype ) != *tasktype ) || task->taskid ) ) | 133 | if (!task->taskid && (TASK_CLASS_MASK & task->tasktype) == *tasktype) { |
154 | task = task->next; | 134 | /* If we found an outstanding task, assign a taskid to it |
155 | 135 | and leave the loop */ | |
156 | /* If we found an outstanding task, assign a taskid to it | 136 | task->taskid = taskid = ++next_free_taskid; |
157 | and leave the loop */ | 137 | *tasktype = task->tasktype; |
158 | if( task ) { | 138 | break; |
159 | task->taskid = taskid = ++next_free_taskid; | 139 | } |
160 | *tasktype = task->tasktype; | 140 | |
161 | } else { | 141 | /* Wait until the next task is being fed */ |
162 | /* Wait until the next task is being fed */ | 142 | if (!taskid) |
163 | MTX_DBG( "poptask cond waits.\n" ); | 143 | pthread_cond_wait(&tasklist_being_filled, &tasklist_mutex); |
164 | pthread_cond_wait( &tasklist_being_filled, &tasklist_mutex ); | ||
165 | MTX_DBG( "poptask cond waited.\n" ); | ||
166 | } | ||
167 | } | 144 | } |
168 | 145 | ||
169 | /* Release lock */ | 146 | /* Release lock */ |
170 | MTX_DBG( "poptask end mutex unlocks.\n" ); | 147 | pthread_mutex_unlock(&tasklist_mutex); |
171 | pthread_mutex_unlock( &tasklist_mutex ); | ||
172 | MTX_DBG( "poptask end mutex unlocked.\n" ); | ||
173 | 148 | ||
174 | return taskid; | 149 | return taskid; |
175 | } | 150 | } |
176 | 151 | ||
177 | void mutex_workqueue_pushsuccess( ot_taskid taskid ) { | 152 | void mutex_workqueue_pushsuccess(ot_taskid taskid) { |
178 | struct ot_task ** task; | 153 | struct ot_task **task; |
179 | 154 | ||
180 | /* Want exclusive access to tasklist */ | 155 | /* Want exclusive access to tasklist */ |
181 | MTX_DBG( "pushsuccess locks.\n" ); | 156 | pthread_mutex_lock(&tasklist_mutex); |
182 | pthread_mutex_lock( &tasklist_mutex ); | 157 | |
183 | MTX_DBG( "pushsuccess locked.\n" ); | 158 | for (task = &tasklist; *task; task = &((*task)->next)) |
184 | 159 | if ((*task)->taskid == taskid) { | |
185 | task = &tasklist; | 160 | struct ot_task *ptask = *task; |
186 | while( *task && ( (*task)->taskid != taskid ) ) | 161 | *task = (*task)->next; |
187 | *task = (*task)->next; | 162 | free(ptask); |
188 | 163 | break; | |
189 | if( *task && ( (*task)->taskid == taskid ) ) { | 164 | } |
190 | struct ot_task *ptask = *task; | ||
191 | *task = (*task)->next; | ||
192 | free( ptask ); | ||
193 | } | ||
194 | 165 | ||
195 | /* Release lock */ | 166 | /* Release lock */ |
196 | MTX_DBG( "pushsuccess unlocks.\n" ); | 167 | pthread_mutex_unlock(&tasklist_mutex); |
197 | pthread_mutex_unlock( &tasklist_mutex ); | ||
198 | MTX_DBG( "pushsuccess unlocked.\n" ); | ||
199 | } | 168 | } |
200 | 169 | ||
201 | int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovec ) { | 170 | int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovec) { |
202 | struct ot_task * task; | 171 | struct ot_task *task; |
203 | const char byte = 'o'; | 172 | const char byte = 'o'; |
204 | 173 | ||
205 | /* Want exclusive access to tasklist */ | 174 | /* Want exclusive access to tasklist */ |
206 | MTX_DBG( "pushresult locks.\n" ); | 175 | pthread_mutex_lock(&tasklist_mutex); |
207 | pthread_mutex_lock( &tasklist_mutex ); | 176 | |
208 | MTX_DBG( "pushresult locked.\n" ); | 177 | for (task = tasklist; task; task = task->next) |
209 | 178 | if (task->taskid == taskid) { | |
210 | task = tasklist; | 179 | task->iovec_entries = iovec_entries; |
211 | while( task && ( task->taskid != taskid ) ) | 180 | task->iovec = iovec; |
212 | task = task->next; | 181 | task->tasktype = TASK_DONE; |
213 | 182 | break; | |
214 | if( task ) { | 183 | } |
215 | task->iovec_entries = iovec_entries; | ||
216 | task->iovec = iovec; | ||
217 | task->tasktype = TASK_DONE; | ||
218 | } | ||
219 | 184 | ||
220 | /* Release lock */ | 185 | /* Release lock */ |
221 | MTX_DBG( "pushresult unlocks.\n" ); | 186 | pthread_mutex_unlock(&tasklist_mutex); |
222 | pthread_mutex_unlock( &tasklist_mutex ); | ||
223 | MTX_DBG( "pushresult unlocked.\n" ); | ||
224 | 187 | ||
225 | io_trywrite( g_self_pipe[1], &byte, 1 ); | 188 | io_trywrite(g_self_pipe[1], &byte, 1); |
226 | 189 | ||
227 | /* Indicate whether the worker has to throw away results */ | 190 | /* Indicate whether the worker has to throw away results */ |
228 | return task ? 0 : -1; | 191 | return task ? 0 : -1; |
229 | } | 192 | } |
230 | 193 | ||
231 | int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) { | 194 | int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec) { |
232 | struct ot_task ** task; | 195 | struct ot_task *task; |
233 | int64 sock = -1; | 196 | const char byte = 'o'; |
234 | 197 | ||
235 | /* Want exclusive access to tasklist */ | 198 | /* Want exclusive access to tasklist */ |
236 | MTX_DBG( "popresult locks.\n" ); | 199 | pthread_mutex_lock(&tasklist_mutex); |
237 | pthread_mutex_lock( &tasklist_mutex ); | 200 | |
238 | MTX_DBG( "popresult locked.\n" ); | 201 | for (task = tasklist; task; task = task->next) |
202 | if (task->taskid == taskid) { | ||
203 | if (iovec) { | ||
204 | if (iovec_append(&task->iovec_entries, &task->iovec, iovec)) | ||
205 | task->tasktype = TASK_DONE_PARTIAL; | ||
206 | else | ||
207 | task = NULL; | ||
208 | } else | ||
209 | task->tasktype = TASK_DONE; | ||
210 | break; | ||
211 | } | ||
212 | |||
213 | /* Release lock */ | ||
214 | pthread_mutex_unlock(&tasklist_mutex); | ||
239 | 215 | ||
240 | task = &tasklist; | 216 | io_trywrite(g_self_pipe[1], &byte, 1); |
241 | while( *task && ( (*task)->tasktype != TASK_DONE ) ) | ||
242 | task = &(*task)->next; | ||
243 | 217 | ||
244 | if( *task && ( (*task)->tasktype == TASK_DONE ) ) { | 218 | /* Indicate whether the worker has to throw away results */ |
245 | struct ot_task *ptask = *task; | 219 | return task ? 0 : -1; |
220 | } | ||
246 | 221 | ||
247 | *iovec_entries = (*task)->iovec_entries; | 222 | int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovec, int *is_partial) { |
248 | *iovec = (*task)->iovec; | 223 | struct ot_task **task; |
249 | sock = (*task)->sock; | 224 | int64 sock = -1; |
250 | 225 | ||
251 | *task = (*task)->next; | 226 | *is_partial = 0; |
252 | free( ptask ); | 227 | |
253 | } | 228 | /* Want exclusive access to tasklist */ |
229 | pthread_mutex_lock(&tasklist_mutex); | ||
230 | |||
231 | for (task = &tasklist; *task; task = &((*task)->next)) | ||
232 | if (((*task)->tasktype & TASK_CLASS_MASK) == TASK_DONE) { | ||
233 | struct ot_task *ptask = *task; | ||
234 | *iovec_entries = ptask->iovec_entries; | ||
235 | *iovec = ptask->iovec; | ||
236 | sock = ptask->sock; | ||
237 | |||
238 | if ((*task)->tasktype == TASK_DONE) { | ||
239 | *task = ptask->next; | ||
240 | free(ptask); | ||
241 | } else { | ||
242 | ptask->iovec_entries = 0; | ||
243 | ptask->iovec = NULL; | ||
244 | *is_partial = 1; | ||
245 | /* Prevent task from showing up immediately again unless new data was added */ | ||
246 | (*task)->tasktype = TASK_FULLSCRAPE; | ||
247 | } | ||
248 | break; | ||
249 | } | ||
254 | 250 | ||
255 | /* Release lock */ | 251 | /* Release lock */ |
256 | MTX_DBG( "popresult unlocks.\n" ); | 252 | pthread_mutex_unlock(&tasklist_mutex); |
257 | pthread_mutex_unlock( &tasklist_mutex ); | ||
258 | MTX_DBG( "popresult unlocked.\n" ); | ||
259 | return sock; | 253 | return sock; |
260 | } | 254 | } |
261 | 255 | ||
262 | void mutex_init( ) { | 256 | void mutex_init() { |
263 | int i; | 257 | int i; |
264 | pthread_mutex_init(&tasklist_mutex, NULL); | 258 | pthread_mutex_init(&tasklist_mutex, NULL); |
265 | pthread_cond_init (&tasklist_being_filled, NULL); | 259 | pthread_cond_init(&tasklist_being_filled, NULL); |
266 | for (i=0; i < OT_BUCKET_COUNT; ++i) | 260 | for (i = 0; i < OT_BUCKET_COUNT; ++i) |
267 | pthread_mutex_init(bucket_mutex + i, NULL); | 261 | pthread_mutex_init(bucket_mutex + i, NULL); |
268 | byte_zero( all_torrents, sizeof( all_torrents ) ); | 262 | byte_zero(all_torrents, sizeof(all_torrents)); |
269 | } | 263 | } |
270 | 264 | ||
271 | void mutex_deinit( ) { | 265 | void mutex_deinit() { |
272 | int i; | 266 | int i; |
273 | for (i=0; i < OT_BUCKET_COUNT; ++i) | 267 | for (i = 0; i < OT_BUCKET_COUNT; ++i) |
274 | pthread_mutex_destroy(bucket_mutex + i); | 268 | pthread_mutex_destroy(bucket_mutex + i); |
275 | pthread_mutex_destroy(&tasklist_mutex); | 269 | pthread_mutex_destroy(&tasklist_mutex); |
276 | pthread_cond_destroy(&tasklist_being_filled); | 270 | pthread_cond_destroy(&tasklist_being_filled); |
277 | byte_zero( all_torrents, sizeof( all_torrents ) ); | 271 | byte_zero(all_torrents, sizeof(all_torrents)); |
278 | } | 272 | } |
279 | |||
280 | const char *g_version_mutex_c = "$Source$: $Revision$\n"; | ||
@@ -7,69 +7,74 @@ | |||
7 | #define OT_MUTEX_H__ | 7 | #define OT_MUTEX_H__ |
8 | 8 | ||
9 | #include <sys/uio.h> | 9 | #include <sys/uio.h> |
10 | #include "trackerlogic.h" | ||
10 | 11 | ||
11 | void mutex_init( ); | 12 | void mutex_init(void); |
12 | void mutex_deinit( ); | 13 | void mutex_deinit(void); |
13 | 14 | ||
14 | ot_vector *mutex_bucket_lock( int bucket ); | 15 | ot_vector *mutex_bucket_lock(int bucket); |
15 | ot_vector *mutex_bucket_lock_by_hash( ot_hash hash ); | 16 | ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash); |
16 | 17 | ||
17 | void mutex_bucket_unlock( int bucket, int delta_torrentcount ); | 18 | void mutex_bucket_unlock(int bucket, int delta_torrentcount); |
18 | void mutex_bucket_unlock_by_hash( ot_hash hash, int delta_torrentcount ); | 19 | void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount); |
19 | 20 | ||
20 | size_t mutex_get_torrent_count(); | 21 | size_t mutex_get_torrent_count(void); |
21 | 22 | ||
22 | typedef enum { | 23 | typedef enum { |
23 | TASK_STATS_CONNS = 0x0001, | 24 | TASK_STATS_CONNS = 0x0001, |
24 | TASK_STATS_TCP = 0x0002, | 25 | TASK_STATS_TCP = 0x0002, |
25 | TASK_STATS_UDP = 0x0003, | 26 | TASK_STATS_UDP = 0x0003, |
26 | TASK_STATS_SCRAPE = 0x0004, | 27 | TASK_STATS_SCRAPE = 0x0004, |
27 | TASK_STATS_FULLSCRAPE = 0x0005, | 28 | TASK_STATS_FULLSCRAPE = 0x0005, |
28 | TASK_STATS_TPB = 0x0006, | 29 | TASK_STATS_TPB = 0x0006, |
29 | TASK_STATS_HTTPERRORS = 0x0007, | 30 | TASK_STATS_HTTPERRORS = 0x0007, |
30 | TASK_STATS_VERSION = 0x0008, | 31 | TASK_STATS_VERSION = 0x0008, |
31 | TASK_STATS_BUSY_NETWORKS = 0x0009, | 32 | TASK_STATS_BUSY_NETWORKS = 0x0009, |
32 | TASK_STATS_RENEW = 0x000a, | 33 | TASK_STATS_RENEW = 0x000a, |
33 | TASK_STATS_SYNCS = 0x000b, | 34 | TASK_STATS_SYNCS = 0x000b, |
34 | TASK_STATS_COMPLETED = 0x000c, | 35 | TASK_STATS_COMPLETED = 0x000c, |
35 | TASK_STATS_NUMWANTS = 0x000d, | 36 | TASK_STATS_NUMWANTS = 0x000d, |
36 | 37 | ||
37 | TASK_STATS = 0x0100, /* Mask */ | 38 | TASK_STATS = 0x0100, /* Mask */ |
38 | TASK_STATS_TORRENTS = 0x0101, | 39 | TASK_STATS_TORRENTS = 0x0101, |
39 | TASK_STATS_PEERS = 0x0102, | 40 | TASK_STATS_PEERS = 0x0102, |
40 | TASK_STATS_SLASH24S = 0x0103, | 41 | TASK_STATS_SLASH24S = 0x0103, |
41 | TASK_STATS_TOP10 = 0x0104, | 42 | TASK_STATS_TOP10 = 0x0104, |
42 | TASK_STATS_TOP100 = 0x0105, | 43 | TASK_STATS_TOP100 = 0x0105, |
43 | TASK_STATS_EVERYTHING = 0x0106, | 44 | TASK_STATS_EVERYTHING = 0x0106, |
44 | TASK_STATS_FULLLOG = 0x0107, | 45 | TASK_STATS_FULLLOG = 0x0107, |
45 | TASK_STATS_WOODPECKERS = 0x0108, | 46 | TASK_STATS_WOODPECKERS = 0x0108, |
46 | 47 | ||
47 | TASK_FULLSCRAPE = 0x0200, /* Default mode */ | 48 | TASK_FULLSCRAPE = 0x0200, /* Default mode */ |
48 | TASK_FULLSCRAPE_TPB_BINARY = 0x0201, | 49 | TASK_FULLSCRAPE_TPB_BINARY = 0x0201, |
49 | TASK_FULLSCRAPE_TPB_ASCII = 0x0202, | 50 | TASK_FULLSCRAPE_TPB_ASCII = 0x0202, |
50 | TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203, | 51 | TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203, |
51 | TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204, | 52 | TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204, |
52 | TASK_FULLSCRAPE_TRACKERSTATE = 0x0205, | 53 | TASK_FULLSCRAPE_TRACKERSTATE = 0x0205, |
53 | 54 | ||
54 | TASK_DMEM = 0x0300, | 55 | TASK_DMEM = 0x0300, |
55 | 56 | ||
56 | TASK_DONE = 0x0f00, | 57 | TASK_DONE = 0x0f00, |
57 | 58 | TASK_DONE_PARTIAL = 0x0f01, | |
58 | TASK_FLAG_GZIP = 0x1000, | 59 | |
59 | TASK_FLAG_BZIP2 = 0x2000, | 60 | TASK_FLAG_GZIP = 0x1000, |
60 | 61 | TASK_FLAG_BZIP2 = 0x2000, | |
61 | TASK_TASK_MASK = 0x0fff, | 62 | TASK_FLAG_ZSTD = 0x4000, |
62 | TASK_CLASS_MASK = 0x0f00, | 63 | TASK_FLAG_CHUNKED = 0x8000, |
63 | TASK_FLAGS_MASK = 0xf000 | 64 | |
65 | TASK_TASK_MASK = 0x0fff, | ||
66 | TASK_CLASS_MASK = 0x0f00, | ||
67 | TASK_FLAGS_MASK = 0xf000 | ||
64 | } ot_tasktype; | 68 | } ot_tasktype; |
65 | 69 | ||
66 | typedef unsigned long ot_taskid; | 70 | typedef unsigned long ot_taskid; |
67 | 71 | ||
68 | int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ); | 72 | int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype); |
69 | void mutex_workqueue_canceltask( int64 sock ); | 73 | void mutex_workqueue_canceltask(int64 sock); |
70 | void mutex_workqueue_pushsuccess( ot_taskid taskid ); | 74 | void mutex_workqueue_pushsuccess(ot_taskid taskid); |
71 | ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ); | 75 | ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype); |
72 | int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovector ); | 76 | int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovector); |
73 | int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovector ); | 77 | int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec); |
78 | int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovector, int *is_partial); | ||
74 | 79 | ||
75 | #endif | 80 | #endif |
diff --git a/ot_rijndael.c b/ot_rijndael.c index f468e2f..3f36bde 100644 --- a/ot_rijndael.c +++ b/ot_rijndael.c | |||
@@ -486,5 +486,3 @@ void rijndaelEncrypt128(const uint32_t rk[44], const uint8_t pt[16], uint8_t ct[ | |||
486 | rk[43]; | 486 | rk[43]; |
487 | PUTU32(ct + 12, s3); | 487 | PUTU32(ct + 12, s3); |
488 | } | 488 | } |
489 | |||
490 | const char *g_version_rijndael_c = "$Source$: $Revision$\n"; | ||
@@ -4,16 +4,16 @@ | |||
4 | $id$ */ | 4 | $id$ */ |
5 | 5 | ||
6 | /* System */ | 6 | /* System */ |
7 | #include <stdlib.h> | ||
8 | #include <arpa/inet.h> | 7 | #include <arpa/inet.h> |
9 | #include <sys/types.h> | 8 | #include <inttypes.h> |
10 | #include <sys/uio.h> | 9 | #include <pthread.h> |
11 | #include <sys/mman.h> | ||
12 | #include <stdio.h> | 10 | #include <stdio.h> |
11 | #include <stdlib.h> | ||
13 | #include <string.h> | 12 | #include <string.h> |
14 | #include <pthread.h> | 13 | #include <sys/mman.h> |
14 | #include <sys/types.h> | ||
15 | #include <sys/uio.h> | ||
15 | #include <unistd.h> | 16 | #include <unistd.h> |
16 | #include <inttypes.h> | ||
17 | #ifdef WANT_SYSLOGS | 17 | #ifdef WANT_SYSLOGS |
18 | #include <syslog.h> | 18 | #include <syslog.h> |
19 | #endif | 19 | #endif |
@@ -25,61 +25,63 @@ | |||
25 | #include "ip6.h" | 25 | #include "ip6.h" |
26 | 26 | ||
27 | /* Opentracker */ | 27 | /* Opentracker */ |
28 | #include "trackerlogic.h" | 28 | #include "ot_accesslist.h" |
29 | #include "ot_mutex.h" | ||
30 | #include "ot_iovec.h" | 29 | #include "ot_iovec.h" |
30 | #include "ot_mutex.h" | ||
31 | #include "ot_stats.h" | 31 | #include "ot_stats.h" |
32 | #include "ot_accesslist.h" | 32 | #include "trackerlogic.h" |
33 | 33 | ||
34 | #ifndef NO_FULLSCRAPE_LOGGING | 34 | #ifndef NO_FULLSCRAPE_LOGGING |
35 | #define LOG_TO_STDERR( ... ) fprintf( stderr, __VA_ARGS__ ) | 35 | #define LOG_TO_STDERR(...) fprintf(stderr, __VA_ARGS__) |
36 | #else | 36 | #else |
37 | #define LOG_TO_STDERR( ... ) | 37 | #define LOG_TO_STDERR(...) |
38 | #endif | 38 | #endif |
39 | 39 | ||
40 | /* Forward declaration */ | 40 | /* Forward declaration */ |
41 | static void stats_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ); | 41 | static void stats_make(int *iovec_entries, struct iovec **iovector, ot_tasktype mode); |
42 | #define OT_STATS_TMPSIZE 8192 | 42 | #define OT_STATS_TMPSIZE 8192 |
43 | 43 | ||
44 | /* Clumsy counters... to be rethought */ | 44 | /* Clumsy counters... to be rethought */ |
45 | static unsigned long long ot_overall_tcp_connections = 0; | 45 | static unsigned long long ot_overall_tcp_connections; |
46 | static unsigned long long ot_overall_udp_connections = 0; | 46 | static unsigned long long ot_overall_udp_connections; |
47 | static unsigned long long ot_overall_tcp_successfulannounces = 0; | 47 | static unsigned long long ot_overall_tcp_successfulannounces; |
48 | static unsigned long long ot_overall_udp_successfulannounces = 0; | 48 | static unsigned long long ot_overall_udp_successfulannounces; |
49 | static unsigned long long ot_overall_tcp_successfulscrapes = 0; | 49 | static unsigned long long ot_overall_tcp_successfulscrapes; |
50 | static unsigned long long ot_overall_udp_successfulscrapes = 0; | 50 | static unsigned long long ot_overall_udp_successfulscrapes; |
51 | static unsigned long long ot_overall_udp_connectionidmissmatches = 0; | 51 | static unsigned long long ot_overall_udp_connectionidmissmatches; |
52 | static unsigned long long ot_overall_tcp_connects = 0; | 52 | static unsigned long long ot_overall_tcp_connects; |
53 | static unsigned long long ot_overall_udp_connects = 0; | 53 | static unsigned long long ot_overall_udp_connects; |
54 | static unsigned long long ot_overall_completed = 0; | 54 | static unsigned long long ot_overall_completed; |
55 | static unsigned long long ot_full_scrape_count = 0; | 55 | static unsigned long long ot_full_scrape_count; |
56 | static unsigned long long ot_full_scrape_request_count = 0; | 56 | static unsigned long long ot_full_scrape_request_count; |
57 | static unsigned long long ot_full_scrape_size = 0; | 57 | static unsigned long long ot_full_scrape_size; |
58 | static unsigned long long ot_failed_request_counts[CODE_HTTPERROR_COUNT]; | 58 | static unsigned long long ot_failed_request_counts[CODE_HTTPERROR_COUNT]; |
59 | static char * ot_failed_request_names[] = { "302 Redirect", "400 Parse Error", "400 Invalid Parameter", "400 Invalid Parameter (compact=0)", "400 Not Modest", "402 Payment Required", "403 Access Denied", "404 Not found", "500 Internal Server Error" }; | 59 | static char *ot_failed_request_names[] = { |
60 | "302 Redirect", "400 Parse Error", "400 Invalid Parameter", "400 Invalid Parameter (compact=0)", "400 Not Modest", | ||
61 | "402 Payment Required", "403 Access Denied", "404 Not found", "500 Internal Server Error"}; | ||
60 | static unsigned long long ot_renewed[OT_PEER_TIMEOUT]; | 62 | static unsigned long long ot_renewed[OT_PEER_TIMEOUT]; |
61 | static unsigned long long ot_overall_sync_count; | 63 | static unsigned long long ot_overall_sync_count; |
62 | static unsigned long long ot_overall_stall_count; | 64 | static unsigned long long ot_overall_stall_count; |
63 | 65 | ||
64 | static time_t ot_start_time; | 66 | static time_t ot_start_time; |
65 | 67 | ||
66 | #define STATS_NETWORK_NODE_BITWIDTH 4 | 68 | #define STATS_NETWORK_NODE_BITWIDTH 4 |
67 | #define STATS_NETWORK_NODE_COUNT (1<<STATS_NETWORK_NODE_BITWIDTH) | 69 | #define STATS_NETWORK_NODE_COUNT (1 << STATS_NETWORK_NODE_BITWIDTH) |
68 | 70 | ||
69 | #define __BYTE(P,D) (((uint8_t*)P)[D/8]) | 71 | #define __BYTE(P, D) (((uint8_t *)P)[D / 8]) |
70 | #define __MSK (STATS_NETWORK_NODE_COUNT-1) | 72 | #define __MSK (STATS_NETWORK_NODE_COUNT - 1) |
71 | #define __SHFT(D) ((D^STATS_NETWORK_NODE_BITWIDTH)&STATS_NETWORK_NODE_BITWIDTH) | 73 | #define __SHFT(D) ((D ^ STATS_NETWORK_NODE_BITWIDTH) & STATS_NETWORK_NODE_BITWIDTH) |
72 | 74 | ||
73 | #define __LDR(P,D) ((__BYTE((P),(D))>>__SHFT((D)))&__MSK) | 75 | #define __LDR(P, D) ((__BYTE((P), (D)) >> __SHFT((D))) & __MSK) |
74 | #define __STR(P,D,V) __BYTE((P),(D))=(__BYTE((P),(D))&~(__MSK<<__SHFT((D))))|((V)<<__SHFT((D))) | 76 | #define __STR(P, D, V) __BYTE((P), (D)) = (__BYTE((P), (D)) & ~(__MSK << __SHFT((D)))) | ((V) << __SHFT((D))) |
75 | 77 | ||
76 | #ifdef WANT_V6 | 78 | #if 0 |
77 | #define STATS_NETWORK_NODE_MAXDEPTH (68-STATS_NETWORK_NODE_BITWIDTH) | 79 | // XXX |
78 | #define STATS_NETWORK_NODE_LIMIT (48-STATS_NETWORK_NODE_BITWIDTH) | 80 | #define STATS_NETWORK_NODE_MAXDEPTH (68 - STATS_NETWORK_NODE_BITWIDTH) |
79 | #else | 81 | #define STATS_NETWORK_NODE_LIMIT (48 - STATS_NETWORK_NODE_BITWIDTH) |
80 | #define STATS_NETWORK_NODE_MAXDEPTH (28-STATS_NETWORK_NODE_BITWIDTH) | ||
81 | #define STATS_NETWORK_NODE_LIMIT (24-STATS_NETWORK_NODE_BITWIDTH) | ||
82 | #endif | 82 | #endif |
83 | #define STATS_NETWORK_NODE_MAXDEPTH (28 - STATS_NETWORK_NODE_BITWIDTH) | ||
84 | #define STATS_NETWORK_NODE_LIMIT (24 - STATS_NETWORK_NODE_BITWIDTH) | ||
83 | 85 | ||
84 | typedef union stats_network_node stats_network_node; | 86 | typedef union stats_network_node stats_network_node; |
85 | union stats_network_node { | 87 | union stats_network_node { |
@@ -91,120 +93,125 @@ union stats_network_node { | |||
91 | static stats_network_node *stats_network_counters_root; | 93 | static stats_network_node *stats_network_counters_root; |
92 | #endif | 94 | #endif |
93 | 95 | ||
94 | static int stat_increase_network_count( stats_network_node **pnode, int depth, uintptr_t ip ) { | 96 | static int stat_increase_network_count(stats_network_node **pnode, int depth, uintptr_t ip) { |
95 | int foo = __LDR(ip,depth); | 97 | int foo = __LDR(ip, depth); |
96 | stats_network_node *node; | 98 | stats_network_node *node; |
97 | 99 | ||
98 | if( !*pnode ) { | 100 | if (!*pnode) { |
99 | *pnode = malloc( sizeof( stats_network_node ) ); | 101 | *pnode = malloc(sizeof(stats_network_node)); |
100 | if( !*pnode ) | 102 | if (!*pnode) |
101 | return -1; | 103 | return -1; |
102 | memset( *pnode, 0, sizeof( stats_network_node ) ); | 104 | memset(*pnode, 0, sizeof(stats_network_node)); |
103 | } | 105 | } |
104 | node = *pnode; | 106 | node = *pnode; |
105 | 107 | ||
106 | if( depth < STATS_NETWORK_NODE_MAXDEPTH ) | 108 | if (depth < STATS_NETWORK_NODE_MAXDEPTH) |
107 | return stat_increase_network_count( node->children + foo, depth+STATS_NETWORK_NODE_BITWIDTH, ip ); | 109 | return stat_increase_network_count(node->children + foo, depth + STATS_NETWORK_NODE_BITWIDTH, ip); |
108 | 110 | ||
109 | node->counters[ foo ]++; | 111 | node->counters[foo]++; |
110 | return 0; | 112 | return 0; |
111 | } | 113 | } |
112 | 114 | ||
113 | static int stats_shift_down_network_count( stats_network_node **node, int depth, int shift ) { | 115 | static int stats_shift_down_network_count(stats_network_node **node, int depth, int shift) { |
114 | int i, rest = 0; | 116 | int i, rest = 0; |
115 | 117 | ||
116 | if( !*node ) | 118 | if (!*node) |
117 | return 0; | 119 | return 0; |
118 | 120 | ||
119 | for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) | 121 | for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) |
120 | if( depth < STATS_NETWORK_NODE_MAXDEPTH ) | 122 | if (depth < STATS_NETWORK_NODE_MAXDEPTH) |
121 | rest += stats_shift_down_network_count( (*node)->children + i, depth+STATS_NETWORK_NODE_BITWIDTH, shift ); | 123 | rest += stats_shift_down_network_count((*node)->children + i, depth + STATS_NETWORK_NODE_BITWIDTH, shift); |
122 | else | 124 | else |
123 | rest += (*node)->counters[i] >>= shift; | 125 | rest += (*node)->counters[i] >>= shift; |
124 | 126 | ||
125 | if( !rest ) { | 127 | if (!rest) { |
126 | free( *node ); | 128 | free(*node); |
127 | *node = NULL; | 129 | *node = NULL; |
128 | } | 130 | } |
129 | 131 | ||
130 | return rest; | 132 | return rest; |
131 | } | 133 | } |
132 | 134 | ||
133 | static size_t stats_get_highscore_networks( stats_network_node *node, int depth, ot_ip6 node_value, size_t *scores, ot_ip6 *networks, int network_count, int limit ) { | 135 | static size_t stats_get_highscore_networks(stats_network_node *node, int depth, ot_ip6 node_value, size_t *scores, ot_ip6 *networks, int network_count, |
136 | int limit) { | ||
134 | size_t score = 0; | 137 | size_t score = 0; |
135 | int i; | 138 | int i; |
136 | 139 | ||
137 | if( !node ) return 0; | 140 | if (!node) |
141 | return 0; | ||
138 | 142 | ||
139 | if( depth < limit ) { | 143 | if (depth < limit) { |
140 | for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) | 144 | for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) |
141 | if( node->children[i] ) { | 145 | if (node->children[i]) { |
142 | __STR(node_value,depth,i); | 146 | __STR(node_value, depth, i); |
143 | score += stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); | 147 | score += stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit); |
144 | } | 148 | } |
145 | return score; | 149 | return score; |
146 | } | 150 | } |
147 | 151 | ||
148 | if( depth > limit && depth < STATS_NETWORK_NODE_MAXDEPTH ) { | 152 | if (depth > limit && depth < STATS_NETWORK_NODE_MAXDEPTH) { |
149 | for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) | 153 | for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) |
150 | if( node->children[i] ) | 154 | if (node->children[i]) |
151 | score += stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); | 155 | score += stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit); |
152 | return score; | 156 | return score; |
153 | } | 157 | } |
154 | 158 | ||
155 | if( depth > limit && depth == STATS_NETWORK_NODE_MAXDEPTH ) { | 159 | if (depth > limit && depth == STATS_NETWORK_NODE_MAXDEPTH) { |
156 | for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) | 160 | for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) |
157 | score += node->counters[i]; | 161 | score += node->counters[i]; |
158 | return score; | 162 | return score; |
159 | } | 163 | } |
160 | 164 | ||
161 | /* if( depth == limit ) */ | 165 | /* if( depth == limit ) */ |
162 | for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) { | 166 | for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) { |
163 | int j=1; | 167 | int j = 1; |
164 | size_t node_score; | 168 | size_t node_score; |
165 | 169 | ||
166 | if( depth == STATS_NETWORK_NODE_MAXDEPTH ) | 170 | if (depth == STATS_NETWORK_NODE_MAXDEPTH) |
167 | node_score = node->counters[i]; | 171 | node_score = node->counters[i]; |
168 | else | 172 | else |
169 | node_score = stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); | 173 | node_score = stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit); |
170 | 174 | ||
171 | score += node_score; | 175 | score += node_score; |
172 | 176 | ||
173 | if( node_score <= scores[0] ) continue; | 177 | if (node_score <= scores[0]) |
178 | continue; | ||
174 | 179 | ||
175 | __STR(node_value,depth,i); | 180 | __STR(node_value, depth, i); |
176 | while( j < network_count && node_score > scores[j] ) ++j; | 181 | while (j < network_count && node_score > scores[j]) |
182 | ++j; | ||
177 | --j; | 183 | --j; |
178 | 184 | ||
179 | memcpy( scores, scores + 1, j * sizeof( *scores ) ); | 185 | memcpy(scores, scores + 1, j * sizeof(*scores)); |
180 | memcpy( networks, networks + 1, j * sizeof( *networks ) ); | 186 | memcpy(networks, networks + 1, j * sizeof(*networks)); |
181 | scores[ j ] = node_score; | 187 | scores[j] = node_score; |
182 | memcpy( networks + j, node_value, sizeof( *networks ) ); | 188 | memcpy(networks + j, node_value, sizeof(*networks)); |
183 | } | 189 | } |
184 | 190 | ||
185 | return score; | 191 | return score; |
186 | } | 192 | } |
187 | 193 | ||
188 | static size_t stats_return_busy_networks( char * reply, stats_network_node *tree, int amount, int limit ) { | 194 | static size_t stats_return_busy_networks(char *reply, stats_network_node *tree, int amount, int limit) { |
189 | ot_ip6 networks[amount]; | 195 | ot_ip6 networks[amount]; |
190 | ot_ip6 node_value; | 196 | ot_ip6 node_value; |
191 | size_t scores[amount]; | 197 | size_t scores[amount]; |
192 | int i; | 198 | int i; |
193 | char * r = reply; | 199 | char *r = reply; |
194 | 200 | ||
195 | memset( scores, 0, sizeof( scores ) ); | 201 | memset(scores, 0, sizeof(scores)); |
196 | memset( networks, 0, sizeof( networks ) ); | 202 | memset(networks, 0, sizeof(networks)); |
197 | memset( node_value, 0, sizeof( node_value ) ); | 203 | memset(node_value, 0, sizeof(node_value)); |
198 | 204 | ||
199 | stats_get_highscore_networks( tree, 0, node_value, scores, networks, amount, limit ); | 205 | stats_get_highscore_networks(tree, 0, node_value, scores, networks, amount, limit); |
200 | 206 | ||
201 | r += sprintf( r, "Networks, limit /%d:\n", limit+STATS_NETWORK_NODE_BITWIDTH ); | 207 | r += sprintf(r, "Networks, limit /%d:\n", limit + STATS_NETWORK_NODE_BITWIDTH); |
202 | for( i=amount-1; i>=0; --i) { | 208 | for (i = amount - 1; i >= 0; --i) { |
203 | if( scores[i] ) { | 209 | if (scores[i]) { |
204 | r += sprintf( r, "%08zd: ", scores[i] ); | 210 | r += sprintf(r, "%08zd: ", scores[i]); |
205 | #ifdef WANT_V6 | 211 | // #ifdef WANT_V6 |
206 | r += fmt_ip6c( r, networks[i] ); | 212 | r += fmt_ip6c(r, networks[i]); |
207 | #else | 213 | #if 0 |
214 | // XXX | ||
208 | r += fmt_ip4( r, networks[i]); | 215 | r += fmt_ip4( r, networks[i]); |
209 | #endif | 216 | #endif |
210 | *r++ = '\n'; | 217 | *r++ = '\n'; |
@@ -215,64 +222,66 @@ static size_t stats_return_busy_networks( char * reply, stats_network_node *tree | |||
215 | return r - reply; | 222 | return r - reply; |
216 | } | 223 | } |
217 | 224 | ||
218 | static size_t stats_slash24s_txt( char *reply, size_t amount ) { | 225 | static size_t stats_slash24s_txt(char *reply, size_t amount) { |
219 | stats_network_node *slash24s_network_counters_root = NULL; | 226 | stats_network_node *slash24s_network_counters_root = NULL; |
220 | char *r=reply; | 227 | char *r = reply; |
221 | int bucket; | 228 | int bucket; |
222 | size_t i; | 229 | size_t i, peer_size = OT_PEER_SIZE4; |
223 | 230 | ||
224 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 231 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
225 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 232 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
226 | for( i=0; i<torrents_list->size; ++i ) { | 233 | for (i = 0; i < torrents_list->size; ++i) { |
227 | ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[i] ).peer_list; | 234 | ot_peerlist *peer_list = (((ot_torrent *)(torrents_list->data))[i]).peer_list4; |
228 | ot_vector *bucket_list = &peer_list->peers; | 235 | ot_vector *bucket_list = &peer_list->peers; |
229 | int num_buckets = 1; | 236 | int num_buckets = 1; |
230 | 237 | ||
231 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { | 238 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
232 | num_buckets = bucket_list->size; | 239 | num_buckets = bucket_list->size; |
233 | bucket_list = (ot_vector *)bucket_list->data; | 240 | bucket_list = (ot_vector *)bucket_list->data; |
234 | } | 241 | } |
235 | 242 | ||
236 | while( num_buckets-- ) { | 243 | while (num_buckets--) { |
237 | ot_peer *peers = (ot_peer*)bucket_list->data; | 244 | ot_peer *peers = (ot_peer *)bucket_list->data; |
238 | size_t numpeers = bucket_list->size; | 245 | size_t numpeers = bucket_list->size; |
239 | while( numpeers-- ) | 246 | while (numpeers--) { |
240 | if( stat_increase_network_count( &slash24s_network_counters_root, 0, (uintptr_t)(peers++) ) ) | 247 | if (stat_increase_network_count(&slash24s_network_counters_root, 0, (uintptr_t)(peers))) |
241 | goto bailout_unlock; | 248 | goto bailout_unlock; |
249 | peers += peer_size; | ||
250 | } | ||
242 | ++bucket_list; | 251 | ++bucket_list; |
243 | } | 252 | } |
244 | } | 253 | } |
245 | mutex_bucket_unlock( bucket, 0 ); | 254 | mutex_bucket_unlock(bucket, 0); |
246 | if( !g_opentracker_running ) | 255 | if (!g_opentracker_running) |
247 | goto bailout_error; | 256 | goto bailout_error; |
248 | } | 257 | } |
249 | 258 | ||
250 | /* The tree is built. Now analyze */ | 259 | /* The tree is built. Now analyze */ |
251 | r += stats_return_busy_networks( r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_MAXDEPTH ); | 260 | r += stats_return_busy_networks(r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_MAXDEPTH); |
252 | r += stats_return_busy_networks( r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_LIMIT ); | 261 | r += stats_return_busy_networks(r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_LIMIT); |
253 | goto success; | 262 | goto success; |
254 | 263 | ||
255 | bailout_unlock: | 264 | bailout_unlock: |
256 | mutex_bucket_unlock( bucket, 0 ); | 265 | mutex_bucket_unlock(bucket, 0); |
257 | bailout_error: | 266 | bailout_error: |
258 | r = reply; | 267 | r = reply; |
259 | success: | 268 | success: |
260 | stats_shift_down_network_count( &slash24s_network_counters_root, 0, sizeof(int)*8-1 ); | 269 | stats_shift_down_network_count(&slash24s_network_counters_root, 0, sizeof(int) * 8 - 1); |
261 | 270 | ||
262 | return r-reply; | 271 | return r - reply; |
263 | } | 272 | } |
264 | 273 | ||
265 | #ifdef WANT_SPOT_WOODPECKER | 274 | #ifdef WANT_SPOT_WOODPECKER |
266 | static stats_network_node *stats_woodpeckers_tree; | 275 | static stats_network_node *stats_woodpeckers_tree; |
267 | static pthread_mutex_t g_woodpeckers_mutex = PTHREAD_MUTEX_INITIALIZER; | 276 | static pthread_mutex_t g_woodpeckers_mutex = PTHREAD_MUTEX_INITIALIZER; |
268 | 277 | ||
269 | static size_t stats_return_woodpeckers( char * reply, int amount ) { | 278 | static size_t stats_return_woodpeckers(char *reply, int amount) { |
270 | char * r = reply; | 279 | char *r = reply; |
271 | 280 | ||
272 | pthread_mutex_lock( &g_woodpeckers_mutex ); | 281 | pthread_mutex_lock(&g_woodpeckers_mutex); |
273 | r += stats_return_busy_networks( r, stats_woodpeckers_tree, amount, STATS_NETWORK_NODE_MAXDEPTH ); | 282 | r += stats_return_busy_networks(r, stats_woodpeckers_tree, amount, STATS_NETWORK_NODE_MAXDEPTH); |
274 | pthread_mutex_unlock( &g_woodpeckers_mutex ); | 283 | pthread_mutex_unlock(&g_woodpeckers_mutex); |
275 | return r-reply; | 284 | return r - reply; |
276 | } | 285 | } |
277 | #endif | 286 | #endif |
278 | 287 | ||
@@ -282,492 +291,481 @@ typedef struct { | |||
282 | unsigned long long seed_count; | 291 | unsigned long long seed_count; |
283 | } torrent_stats; | 292 | } torrent_stats; |
284 | 293 | ||
285 | static int torrent_statter( ot_torrent *torrent, uintptr_t data ) { | 294 | static int torrent_statter(ot_torrent *torrent, uintptr_t data) { |
286 | torrent_stats *stats = (torrent_stats*)data; | 295 | torrent_stats *stats = (torrent_stats *)data; |
287 | stats->torrent_count++; | 296 | stats->torrent_count++; |
288 | stats->peer_count += torrent->peer_list->peer_count; | 297 | stats->peer_count += torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; |
289 | stats->seed_count += torrent->peer_list->seed_count; | 298 | stats->seed_count += torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; |
290 | return 0; | 299 | return 0; |
291 | } | 300 | } |
292 | 301 | ||
293 | /* Converter function from memory to human readable hex strings */ | 302 | /* Converter function from memory to human readable hex strings */ |
294 | static char*to_hex(char*d,uint8_t*s){char*m="0123456789ABCDEF";char *t=d;char*e=d+40;while(d<e){*d++=m[*s>>4];*d++=m[*s++&15];}*d=0;return t;} | 303 | static char *to_hex(char *d, uint8_t *s) { |
304 | char *m = "0123456789ABCDEF"; | ||
305 | char *t = d; | ||
306 | char *e = d + 40; | ||
307 | while (d < e) { | ||
308 | *d++ = m[*s >> 4]; | ||
309 | *d++ = m[*s++ & 15]; | ||
310 | } | ||
311 | *d = 0; | ||
312 | return t; | ||
313 | } | ||
295 | 314 | ||
296 | typedef struct { size_t val; ot_torrent * torrent; } ot_record; | 315 | typedef struct { |
316 | size_t val; | ||
317 | ot_hash hash; | ||
318 | } ot_record; | ||
297 | 319 | ||
298 | /* Fetches stats from tracker */ | 320 | /* Fetches stats from tracker */ |
299 | size_t stats_top_txt( char * reply, int amount ) { | 321 | size_t stats_top_txt(char *reply, int amount) { |
300 | size_t j; | 322 | size_t j; |
301 | ot_record top100s[100], top100c[100]; | 323 | ot_record top100s[100], top100c[100]; |
302 | char *r = reply, hex_out[42]; | 324 | char *r = reply, hex_out[42]; |
303 | int idx, bucket; | 325 | int idx, bucket; |
304 | 326 | ||
305 | if( amount > 100 ) | 327 | if (amount > 100) |
306 | amount = 100; | 328 | amount = 100; |
307 | 329 | ||
308 | byte_zero( top100s, sizeof( top100s ) ); | 330 | byte_zero(top100s, sizeof(top100s)); |
309 | byte_zero( top100c, sizeof( top100c ) ); | 331 | byte_zero(top100c, sizeof(top100c)); |
310 | 332 | ||
311 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 333 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
312 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 334 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
313 | for( j=0; j<torrents_list->size; ++j ) { | 335 | for (j = 0; j < torrents_list->size; ++j) { |
314 | ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[j] ).peer_list; | 336 | ot_torrent *torrent = (ot_torrent *)(torrents_list->data) + j; |
315 | int idx = amount - 1; while( (idx >= 0) && ( peer_list->peer_count > top100c[idx].val ) ) --idx; | 337 | size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; |
316 | if ( idx++ != amount - 1 ) { | 338 | size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; |
317 | memmove( top100c + idx + 1, top100c + idx, ( amount - 1 - idx ) * sizeof( ot_record ) ); | 339 | idx = amount - 1; |
318 | top100c[idx].val = peer_list->peer_count; | 340 | while ((idx >= 0) && (peer_count > top100c[idx].val)) |
319 | top100c[idx].torrent = (ot_torrent*)(torrents_list->data) + j; | 341 | --idx; |
342 | if (idx++ != amount - 1) { | ||
343 | memmove(top100c + idx + 1, top100c + idx, (amount - 1 - idx) * sizeof(ot_record)); | ||
344 | memcpy(&top100c[idx].hash, &torrent->hash, sizeof(ot_hash)); | ||
345 | top100c[idx].val = peer_count; | ||
320 | } | 346 | } |
321 | idx = amount - 1; while( (idx >= 0) && ( peer_list->seed_count > top100s[idx].val ) ) --idx; | 347 | idx = amount - 1; |
322 | if ( idx++ != amount - 1 ) { | 348 | while ((idx >= 0) && (seed_count > top100s[idx].val)) |
323 | memmove( top100s + idx + 1, top100s + idx, ( amount - 1 - idx ) * sizeof( ot_record ) ); | 349 | --idx; |
324 | top100s[idx].val = peer_list->seed_count; | 350 | if (idx++ != amount - 1) { |
325 | top100s[idx].torrent = (ot_torrent*)(torrents_list->data) + j; | 351 | memmove(top100s + idx + 1, top100s + idx, (amount - 1 - idx) * sizeof(ot_record)); |
352 | memcpy(&top100s[idx].hash, &torrent->hash, sizeof(ot_hash)); | ||
353 | top100s[idx].val = seed_count; | ||
326 | } | 354 | } |
327 | } | 355 | } |
328 | mutex_bucket_unlock( bucket, 0 ); | 356 | mutex_bucket_unlock(bucket, 0); |
329 | if( !g_opentracker_running ) | 357 | if (!g_opentracker_running) |
330 | return 0; | 358 | return 0; |
331 | } | 359 | } |
332 | 360 | ||
333 | r += sprintf( r, "Top %d torrents by peers:\n", amount ); | 361 | r += sprintf(r, "Top %d torrents by peers:\n", amount); |
334 | for( idx=0; idx<amount; ++idx ) | 362 | for (idx = 0; idx < amount; ++idx) |
335 | if( top100c[idx].torrent ) | 363 | if (top100c[idx].val) |
336 | r += sprintf( r, "\t%zd\t%s\n", top100c[idx].val, to_hex( hex_out, top100c[idx].torrent->hash) ); | 364 | r += sprintf(r, "\t%zd\t%s\n", top100c[idx].val, to_hex(hex_out, top100c[idx].hash)); |
337 | r += sprintf( r, "Top %d torrents by seeds:\n", amount ); | 365 | r += sprintf(r, "Top %d torrents by seeds:\n", amount); |
338 | for( idx=0; idx<amount; ++idx ) | 366 | for (idx = 0; idx < amount; ++idx) |
339 | if( top100s[idx].torrent ) | 367 | if (top100s[idx].val) |
340 | r += sprintf( r, "\t%zd\t%s\n", top100s[idx].val, to_hex( hex_out, top100s[idx].torrent->hash) ); | 368 | r += sprintf(r, "\t%zd\t%s\n", top100s[idx].val, to_hex(hex_out, top100s[idx].hash)); |
341 | 369 | ||
342 | return r - reply; | 370 | return r - reply; |
343 | } | 371 | } |
344 | 372 | ||
345 | static unsigned long events_per_time( unsigned long long events, time_t t ) { | 373 | static unsigned long events_per_time(unsigned long long events, time_t t) { return events / ((unsigned int)t ? (unsigned int)t : 1); } |
346 | return events / ( (unsigned int)t ? (unsigned int)t : 1 ); | ||
347 | } | ||
348 | 374 | ||
349 | static size_t stats_connections_mrtg( char * reply ) { | 375 | static size_t stats_connections_mrtg(char *reply) { |
350 | ot_time t = time( NULL ) - ot_start_time; | 376 | ot_time t = time(NULL) - ot_start_time; |
351 | return sprintf( reply, | 377 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", |
352 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", | 378 | ot_overall_tcp_connections + ot_overall_udp_connections, |
353 | ot_overall_tcp_connections+ot_overall_udp_connections, | 379 | ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, (int)t, (int)(t / 3600), |
354 | ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, | 380 | events_per_time(ot_overall_tcp_connections + ot_overall_udp_connections, t), |
355 | (int)t, | 381 | events_per_time(ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, t)); |
356 | (int)(t / 3600), | ||
357 | events_per_time( ot_overall_tcp_connections+ot_overall_udp_connections, t ), | ||
358 | events_per_time( ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, t ) | ||
359 | ); | ||
360 | } | 382 | } |
361 | 383 | ||
362 | static size_t stats_udpconnections_mrtg( char * reply ) { | 384 | static size_t stats_udpconnections_mrtg(char *reply) { |
363 | ot_time t = time( NULL ) - ot_start_time; | 385 | ot_time t = time(NULL) - ot_start_time; |
364 | return sprintf( reply, | 386 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker udp4 stats, %lu conns/s :: %lu success/s.", ot_overall_udp_connections, |
365 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker udp4 stats, %lu conns/s :: %lu success/s.", | 387 | ot_overall_udp_successfulannounces + ot_overall_udp_connects, (int)t, (int)(t / 3600), events_per_time(ot_overall_udp_connections, t), |
366 | ot_overall_udp_connections, | 388 | events_per_time(ot_overall_udp_successfulannounces + ot_overall_udp_connects, t)); |
367 | ot_overall_udp_successfulannounces+ot_overall_udp_connects, | ||
368 | (int)t, | ||
369 | (int)(t / 3600), | ||
370 | events_per_time( ot_overall_udp_connections, t ), | ||
371 | events_per_time( ot_overall_udp_successfulannounces+ot_overall_udp_connects, t ) | ||
372 | ); | ||
373 | } | 389 | } |
374 | 390 | ||
375 | static size_t stats_tcpconnections_mrtg( char * reply ) { | 391 | static size_t stats_tcpconnections_mrtg(char *reply) { |
376 | time_t t = time( NULL ) - ot_start_time; | 392 | time_t t = time(NULL) - ot_start_time; |
377 | return sprintf( reply, | 393 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker tcp4 stats, %lu conns/s :: %lu success/s.", ot_overall_tcp_connections, |
378 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker tcp4 stats, %lu conns/s :: %lu success/s.", | 394 | ot_overall_tcp_successfulannounces, (int)t, (int)(t / 3600), events_per_time(ot_overall_tcp_connections, t), |
379 | ot_overall_tcp_connections, | 395 | events_per_time(ot_overall_tcp_successfulannounces, t)); |
380 | ot_overall_tcp_successfulannounces, | ||
381 | (int)t, | ||
382 | (int)(t / 3600), | ||
383 | events_per_time( ot_overall_tcp_connections, t ), | ||
384 | events_per_time( ot_overall_tcp_successfulannounces, t ) | ||
385 | ); | ||
386 | } | 396 | } |
387 | 397 | ||
388 | static size_t stats_scrape_mrtg( char * reply ) { | 398 | static size_t stats_scrape_mrtg(char *reply) { |
389 | time_t t = time( NULL ) - ot_start_time; | 399 | time_t t = time(NULL) - ot_start_time; |
390 | return sprintf( reply, | 400 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker scrape stats, %lu scrape/s (tcp and udp)", ot_overall_tcp_successfulscrapes, |
391 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker scrape stats, %lu scrape/s (tcp and udp)", | 401 | ot_overall_udp_successfulscrapes, (int)t, (int)(t / 3600), |
392 | ot_overall_tcp_successfulscrapes, | 402 | events_per_time((ot_overall_tcp_successfulscrapes + ot_overall_udp_successfulscrapes), t)); |
393 | ot_overall_udp_successfulscrapes, | ||
394 | (int)t, | ||
395 | (int)(t / 3600), | ||
396 | events_per_time( (ot_overall_tcp_successfulscrapes+ot_overall_udp_successfulscrapes), t ) | ||
397 | ); | ||
398 | } | 403 | } |
399 | 404 | ||
400 | static size_t stats_fullscrapes_mrtg( char * reply ) { | 405 | static size_t stats_fullscrapes_mrtg(char *reply) { |
401 | ot_time t = time( NULL ) - ot_start_time; | 406 | ot_time t = time(NULL) - ot_start_time; |
402 | return sprintf( reply, | 407 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker full scrape stats, %lu conns/s :: %lu bytes/s.", ot_full_scrape_count * 1000, |
403 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker full scrape stats, %lu conns/s :: %lu bytes/s.", | 408 | ot_full_scrape_size, (int)t, (int)(t / 3600), events_per_time(ot_full_scrape_count, t), events_per_time(ot_full_scrape_size, t)); |
404 | ot_full_scrape_count * 1000, | ||
405 | ot_full_scrape_size, | ||
406 | (int)t, | ||
407 | (int)(t / 3600), | ||
408 | events_per_time( ot_full_scrape_count, t ), | ||
409 | events_per_time( ot_full_scrape_size, t ) | ||
410 | ); | ||
411 | } | 409 | } |
412 | 410 | ||
413 | static size_t stats_peers_mrtg( char * reply ) { | 411 | static size_t stats_peers_mrtg(char *reply) { |
414 | torrent_stats stats = {0,0,0}; | 412 | torrent_stats stats = {0, 0, 0}; |
415 | 413 | ||
416 | iterate_all_torrents( torrent_statter, (uintptr_t)&stats ); | 414 | iterate_all_torrents(torrent_statter, (uintptr_t)&stats); |
417 | 415 | ||
418 | return sprintf( reply, "%llu\n%llu\nopentracker serving %llu torrents\nopentracker", | 416 | return sprintf(reply, "%llu\n%llu\nopentracker serving %llu torrents\nopentracker", stats.peer_count, stats.seed_count, stats.torrent_count); |
419 | stats.peer_count, | ||
420 | stats.seed_count, | ||
421 | stats.torrent_count | ||
422 | ); | ||
423 | } | 417 | } |
424 | 418 | ||
425 | static size_t stats_torrents_mrtg( char * reply ) | 419 | static size_t stats_torrents_mrtg(char *reply) { |
426 | { | ||
427 | size_t torrent_count = mutex_get_torrent_count(); | 420 | size_t torrent_count = mutex_get_torrent_count(); |
428 | 421 | ||
429 | return sprintf( reply, "%zd\n%zd\nopentracker serving %zd torrents\nopentracker", | 422 | return sprintf(reply, "%zd\n%zd\nopentracker serving %zd torrents\nopentracker", torrent_count, (size_t)0, torrent_count); |
430 | torrent_count, | ||
431 | (size_t)0, | ||
432 | torrent_count | ||
433 | ); | ||
434 | } | 423 | } |
435 | 424 | ||
436 | static size_t stats_httperrors_txt ( char * reply ) { | 425 | static size_t stats_httperrors_txt(char *reply) { |
437 | return sprintf( reply, "302 RED %llu\n400 ... %llu\n400 PAR %llu\n400 COM %llu\n403 IP %llu\n404 INV %llu\n500 SRV %llu\n", | 426 | return sprintf(reply, "302 RED %llu\n400 ... %llu\n400 PAR %llu\n400 COM %llu\n403 IP %llu\n404 INV %llu\n500 SRV %llu\n", ot_failed_request_counts[0], |
438 | ot_failed_request_counts[0], ot_failed_request_counts[1], ot_failed_request_counts[2], | 427 | ot_failed_request_counts[1], ot_failed_request_counts[2], ot_failed_request_counts[3], ot_failed_request_counts[4], |
439 | ot_failed_request_counts[3], ot_failed_request_counts[4], ot_failed_request_counts[5], | 428 | ot_failed_request_counts[5], ot_failed_request_counts[6]); |
440 | ot_failed_request_counts[6] ); | ||
441 | } | 429 | } |
442 | 430 | ||
443 | static size_t stats_return_renew_bucket( char * reply ) { | 431 | static size_t stats_return_renew_bucket(char *reply) { |
444 | char *r = reply; | 432 | char *r = reply; |
445 | int i; | 433 | int i; |
446 | 434 | ||
447 | for( i=0; i<OT_PEER_TIMEOUT; ++i ) | 435 | for (i = 0; i < OT_PEER_TIMEOUT; ++i) |
448 | r+=sprintf(r,"%02i %llu\n", i, ot_renewed[i] ); | 436 | r += sprintf(r, "%02i %llu\n", i, ot_renewed[i]); |
449 | return r - reply; | 437 | return r - reply; |
450 | } | 438 | } |
451 | 439 | ||
452 | static size_t stats_return_sync_mrtg( char * reply ) { | 440 | static size_t stats_return_sync_mrtg(char *reply) { |
453 | ot_time t = time( NULL ) - ot_start_time; | 441 | ot_time t = time(NULL) - ot_start_time; |
454 | return sprintf( reply, | 442 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", ot_overall_sync_count, 0LL, (int)t, |
455 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", | 443 | (int)(t / 3600), events_per_time(ot_overall_tcp_connections + ot_overall_udp_connections, t), |
456 | ot_overall_sync_count, | 444 | events_per_time(ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, t)); |
457 | 0LL, | ||
458 | (int)t, | ||
459 | (int)(t / 3600), | ||
460 | events_per_time( ot_overall_tcp_connections+ot_overall_udp_connections, t ), | ||
461 | events_per_time( ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, t ) | ||
462 | ); | ||
463 | } | 445 | } |
464 | 446 | ||
465 | static size_t stats_return_completed_mrtg( char * reply ) { | 447 | static size_t stats_return_completed_mrtg(char *reply) { |
466 | ot_time t = time( NULL ) - ot_start_time; | 448 | ot_time t = time(NULL) - ot_start_time; |
467 | 449 | ||
468 | return sprintf( reply, | 450 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker, %lu completed/h.", ot_overall_completed, 0LL, (int)t, (int)(t / 3600), |
469 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker, %lu completed/h.", | 451 | events_per_time(ot_overall_completed, t / 3600)); |
470 | ot_overall_completed, | ||
471 | 0LL, | ||
472 | (int)t, | ||
473 | (int)(t / 3600), | ||
474 | events_per_time( ot_overall_completed, t / 3600 ) | ||
475 | ); | ||
476 | } | 452 | } |
477 | 453 | ||
478 | #ifdef WANT_LOG_NUMWANT | 454 | #ifdef WANT_LOG_NUMWANT |
479 | extern unsigned long long numwants[201]; | 455 | extern unsigned long long numwants[201]; |
480 | static size_t stats_return_numwants( char * reply ) { | 456 | static size_t stats_return_numwants(char *reply) { |
481 | char * r = reply; | 457 | char *r = reply; |
482 | int i; | 458 | int i; |
483 | for( i=0; i<=200; ++i ) | 459 | for (i = 0; i <= 200; ++i) |
484 | r += sprintf( r, "%03d => %lld\n", i, numwants[i] ); | 460 | r += sprintf(r, "%03d => %lld\n", i, numwants[i]); |
485 | return r-reply; | 461 | return r - reply; |
486 | } | 462 | } |
487 | #endif | 463 | #endif |
488 | 464 | ||
489 | #ifdef WANT_FULLLOG_NETWORKS | 465 | #ifdef WANT_FULLLOG_NETWORKS |
490 | static void stats_return_fulllog( int *iovec_entries, struct iovec **iovector, char *r ) { | 466 | static void stats_return_fulllog(int *iovec_entries, struct iovec **iovector, char *r) { |
491 | ot_log *loglist = g_logchain_first, *llnext; | 467 | ot_log *loglist = g_logchain_first, *llnext; |
492 | char * re = r + OT_STATS_TMPSIZE; | 468 | char *re = r + OT_STATS_TMPSIZE; |
493 | 469 | ||
494 | g_logchain_first = g_logchain_last = 0; | 470 | g_logchain_first = g_logchain_last = 0; |
495 | 471 | ||
496 | while( loglist ) { | 472 | while (loglist) { |
497 | if( r + ( loglist->size + 64 ) >= re ) { | 473 | if (r + (loglist->size + 64) >= re) { |
498 | r = iovec_fix_increase_or_free( iovec_entries, iovector, r, 32 * OT_STATS_TMPSIZE ); | 474 | r = iovec_fix_increase_or_free(iovec_entries, iovector, r, 32 * OT_STATS_TMPSIZE); |
499 | if( !r ) return; | 475 | if (!r) |
476 | return; | ||
500 | re = r + 32 * OT_STATS_TMPSIZE; | 477 | re = r + 32 * OT_STATS_TMPSIZE; |
501 | } | 478 | } |
502 | r += sprintf( r, "%08ld: ", loglist->time ); | 479 | r += sprintf(r, "%08ld: ", loglist->time); |
503 | r += fmt_ip6c( r, loglist->ip ); | 480 | r += fmt_ip6c(r, loglist->ip); |
504 | *r++ = '\n'; | 481 | *r++ = '\n'; |
505 | memcpy( r, loglist->data, loglist->size ); | 482 | memcpy(r, loglist->data, loglist->size); |
506 | r += loglist->size; | 483 | r += loglist->size; |
507 | *r++ = '\n'; | 484 | *r++ = '\n'; |
508 | *r++ = '*'; | 485 | *r++ = '*'; |
509 | *r++ = '\n'; | 486 | *r++ = '\n'; |
510 | *r++ = '\n'; | 487 | *r++ = '\n'; |
511 | 488 | ||
512 | llnext = loglist->next; | 489 | llnext = loglist->next; |
513 | free( loglist->data ); | 490 | free(loglist->data); |
514 | free( loglist ); | 491 | free(loglist); |
515 | loglist = llnext; | 492 | loglist = llnext; |
516 | } | 493 | } |
517 | iovec_fixlast( iovec_entries, iovector, r ); | 494 | iovec_fixlast(iovec_entries, iovector, r); |
518 | } | 495 | } |
519 | #endif | 496 | #endif |
520 | 497 | ||
521 | static size_t stats_return_everything( char * reply ) { | 498 | static size_t stats_return_everything(char *reply) { |
522 | torrent_stats stats = {0,0,0}; | 499 | torrent_stats stats = {0, 0, 0}; |
523 | int i; | 500 | int i; |
524 | char * r = reply; | 501 | char *r = reply; |
525 | 502 | ||
526 | iterate_all_torrents( torrent_statter, (uintptr_t)&stats ); | 503 | iterate_all_torrents(torrent_statter, (uintptr_t)&stats); |
527 | 504 | ||
528 | r += sprintf( r, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" ); | 505 | r += sprintf(r, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); |
529 | r += sprintf( r, "<stats>\n" ); | 506 | r += sprintf(r, "<stats>\n"); |
530 | r += sprintf( r, " <tracker_id>%" PRIu32 "</tracker_id>\n", g_tracker_id ); | 507 | r += sprintf(r, " <tracker_id>%" PRIu32 "</tracker_id>\n", g_tracker_id); |
531 | r += sprintf( r, " <version>\n" ); r += stats_return_tracker_version( r ); r += sprintf( r, " </version>\n" ); | 508 | r += sprintf(r, " <version>\n"); |
532 | r += sprintf( r, " <uptime>%llu</uptime>\n", (unsigned long long)(time( NULL ) - ot_start_time) ); | 509 | r += stats_return_tracker_version(r); |
533 | r += sprintf( r, " <torrents>\n" ); | 510 | r += sprintf(r, " </version>\n"); |
534 | r += sprintf( r, " <count_mutex>%zd</count_mutex>\n", mutex_get_torrent_count() ); | 511 | r += sprintf(r, " <uptime>%llu</uptime>\n", (unsigned long long)(time(NULL) - ot_start_time)); |
535 | r += sprintf( r, " <count_iterator>%llu</count_iterator>\n", stats.torrent_count ); | 512 | r += sprintf(r, " <torrents>\n"); |
536 | r += sprintf( r, " </torrents>\n" ); | 513 | r += sprintf(r, " <count_mutex>%zd</count_mutex>\n", mutex_get_torrent_count()); |
537 | r += sprintf( r, " <peers>\n <count>%llu</count>\n </peers>\n", stats.peer_count ); | 514 | r += sprintf(r, " <count_iterator>%llu</count_iterator>\n", stats.torrent_count); |
538 | r += sprintf( r, " <seeds>\n <count>%llu</count>\n </seeds>\n", stats.seed_count ); | 515 | r += sprintf(r, " </torrents>\n"); |
539 | r += sprintf( r, " <completed>\n <count>%llu</count>\n </completed>\n", ot_overall_completed ); | 516 | r += sprintf(r, " <peers>\n <count>%llu</count>\n </peers>\n", stats.peer_count); |
540 | r += sprintf( r, " <connections>\n" ); | 517 | r += sprintf(r, " <seeds>\n <count>%llu</count>\n </seeds>\n", stats.seed_count); |
541 | r += sprintf( r, " <tcp>\n <accept>%llu</accept>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n </tcp>\n", ot_overall_tcp_connections, ot_overall_tcp_successfulannounces, ot_overall_tcp_successfulscrapes ); | 518 | r += sprintf(r, " <completed>\n <count>%llu</count>\n </completed>\n", ot_overall_completed); |
542 | r += sprintf( r, " <udp>\n <overall>%llu</overall>\n <connect>%llu</connect>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n <missmatch>%llu</missmatch>\n </udp>\n", ot_overall_udp_connections, ot_overall_udp_connects, ot_overall_udp_successfulannounces, ot_overall_udp_successfulscrapes, ot_overall_udp_connectionidmissmatches ); | 519 | r += sprintf(r, " <connections>\n"); |
543 | r += sprintf( r, " <livesync>\n <count>%llu</count>\n </livesync>\n", ot_overall_sync_count ); | 520 | r += sprintf(r, " <tcp>\n <accept>%llu</accept>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n </tcp>\n", |
544 | r += sprintf( r, " </connections>\n" ); | 521 | ot_overall_tcp_connections, ot_overall_tcp_successfulannounces, ot_overall_tcp_successfulscrapes); |
545 | r += sprintf( r, " <debug>\n" ); | 522 | r += sprintf(r, " <udp>\n <overall>%llu</overall>\n <connect>%llu</connect>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n <missmatch>%llu</missmatch>\n </udp>\n", |
546 | r += sprintf( r, " <renew>\n" ); | 523 | ot_overall_udp_connections, ot_overall_udp_connects, ot_overall_udp_successfulannounces, ot_overall_udp_successfulscrapes, |
547 | for( i=0; i<OT_PEER_TIMEOUT; ++i ) | 524 | ot_overall_udp_connectionidmissmatches); |
548 | r += sprintf( r, " <count interval=\"%02i\">%llu</count>\n", i, ot_renewed[i] ); | 525 | r += sprintf(r, " <livesync>\n <count>%llu</count>\n </livesync>\n", ot_overall_sync_count); |
549 | r += sprintf( r, " </renew>\n" ); | 526 | r += sprintf(r, " </connections>\n"); |
550 | r += sprintf( r, " <http_error>\n" ); | 527 | r += sprintf(r, " <debug>\n"); |
551 | for( i=0; i<CODE_HTTPERROR_COUNT; ++i ) | 528 | r += sprintf(r, " <renew>\n"); |
552 | r += sprintf( r, " <count code=\"%s\">%llu</count>\n", ot_failed_request_names[i], ot_failed_request_counts[i] ); | 529 | for (i = 0; i < OT_PEER_TIMEOUT; ++i) |
553 | r += sprintf( r, " </http_error>\n" ); | 530 | r += sprintf(r, " <count interval=\"%02i\">%llu</count>\n", i, ot_renewed[i]); |
554 | r += sprintf( r, " <mutex_stall>\n <count>%llu</count>\n </mutex_stall>\n", ot_overall_stall_count ); | 531 | r += sprintf(r, " </renew>\n"); |
555 | r += sprintf( r, " </debug>\n" ); | 532 | r += sprintf(r, " <http_error>\n"); |
556 | r += sprintf( r, "</stats>" ); | 533 | for (i = 0; i < CODE_HTTPERROR_COUNT; ++i) |
534 | r += sprintf(r, " <count code=\"%s\">%llu</count>\n", ot_failed_request_names[i], ot_failed_request_counts[i]); | ||
535 | r += sprintf(r, " </http_error>\n"); | ||
536 | r += sprintf(r, " <mutex_stall>\n <count>%llu</count>\n </mutex_stall>\n", ot_overall_stall_count); | ||
537 | r += sprintf(r, " </debug>\n"); | ||
538 | r += sprintf(r, "</stats>"); | ||
557 | return r - reply; | 539 | return r - reply; |
558 | } | 540 | } |
559 | 541 | ||
560 | extern const char | 542 | size_t stats_return_tracker_version(char *reply) { |
561 | *g_version_opentracker_c, *g_version_accesslist_c, *g_version_clean_c, *g_version_fullscrape_c, *g_version_http_c, | 543 | #define QUOTE(name) #name |
562 | *g_version_iovec_c, *g_version_mutex_c, *g_version_stats_c, *g_version_udp_c, *g_version_vector_c, | 544 | #define SQUOTE(name) QUOTE(name) |
563 | *g_version_scan_urlencoded_query_c, *g_version_trackerlogic_c, *g_version_livesync_c, *g_version_rijndael_c; | 545 | return sprintf(reply, "https://erdgeist.org/gitweb/opentracker/commit/?id=" SQUOTE(GIT_VERSION) "\n"); |
564 | 546 | } | |
565 | size_t stats_return_tracker_version( char *reply ) { | 547 | |
566 | return sprintf( reply, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s", | 548 | size_t return_stats_for_tracker(char *reply, int mode, int format) { |
567 | g_version_opentracker_c, g_version_accesslist_c, g_version_clean_c, g_version_fullscrape_c, g_version_http_c, | 549 | (void)format; |
568 | g_version_iovec_c, g_version_mutex_c, g_version_stats_c, g_version_udp_c, g_version_vector_c, | 550 | switch (mode & TASK_TASK_MASK) { |
569 | g_version_scan_urlencoded_query_c, g_version_trackerlogic_c, g_version_livesync_c, g_version_rijndael_c ); | 551 | case TASK_STATS_CONNS: |
570 | } | 552 | return stats_connections_mrtg(reply); |
571 | 553 | case TASK_STATS_SCRAPE: | |
572 | size_t return_stats_for_tracker( char *reply, int mode, int format ) { | 554 | return stats_scrape_mrtg(reply); |
573 | (void) format; | 555 | case TASK_STATS_UDP: |
574 | switch( mode & TASK_TASK_MASK ) { | 556 | return stats_udpconnections_mrtg(reply); |
575 | case TASK_STATS_CONNS: | 557 | case TASK_STATS_TCP: |
576 | return stats_connections_mrtg( reply ); | 558 | return stats_tcpconnections_mrtg(reply); |
577 | case TASK_STATS_SCRAPE: | 559 | case TASK_STATS_FULLSCRAPE: |
578 | return stats_scrape_mrtg( reply ); | 560 | return stats_fullscrapes_mrtg(reply); |
579 | case TASK_STATS_UDP: | 561 | case TASK_STATS_COMPLETED: |
580 | return stats_udpconnections_mrtg( reply ); | 562 | return stats_return_completed_mrtg(reply); |
581 | case TASK_STATS_TCP: | 563 | case TASK_STATS_HTTPERRORS: |
582 | return stats_tcpconnections_mrtg( reply ); | 564 | return stats_httperrors_txt(reply); |
583 | case TASK_STATS_FULLSCRAPE: | 565 | case TASK_STATS_VERSION: |
584 | return stats_fullscrapes_mrtg( reply ); | 566 | return stats_return_tracker_version(reply); |
585 | case TASK_STATS_COMPLETED: | 567 | case TASK_STATS_RENEW: |
586 | return stats_return_completed_mrtg( reply ); | 568 | return stats_return_renew_bucket(reply); |
587 | case TASK_STATS_HTTPERRORS: | 569 | case TASK_STATS_SYNCS: |
588 | return stats_httperrors_txt( reply ); | 570 | return stats_return_sync_mrtg(reply); |
589 | case TASK_STATS_VERSION: | ||
590 | return stats_return_tracker_version( reply ); | ||
591 | case TASK_STATS_RENEW: | ||
592 | return stats_return_renew_bucket( reply ); | ||
593 | case TASK_STATS_SYNCS: | ||
594 | return stats_return_sync_mrtg( reply ); | ||
595 | #ifdef WANT_LOG_NUMWANT | 571 | #ifdef WANT_LOG_NUMWANT |
596 | case TASK_STATS_NUMWANTS: | 572 | case TASK_STATS_NUMWANTS: |
597 | return stats_return_numwants( reply ); | 573 | return stats_return_numwants(reply); |
598 | #endif | 574 | #endif |
599 | default: | 575 | default: |
600 | return 0; | 576 | return 0; |
601 | } | 577 | } |
602 | } | 578 | } |
603 | 579 | ||
604 | static void stats_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) { | 580 | static void stats_make(int *iovec_entries, struct iovec **iovector, ot_tasktype mode) { |
605 | char *r; | 581 | char *r; |
606 | 582 | ||
607 | *iovec_entries = 0; | 583 | *iovec_entries = 0; |
608 | *iovector = NULL; | 584 | *iovector = NULL; |
609 | if( !( r = iovec_increase( iovec_entries, iovector, OT_STATS_TMPSIZE ) ) ) | 585 | if (!(r = iovec_increase(iovec_entries, iovector, OT_STATS_TMPSIZE))) |
610 | return; | 586 | return; |
611 | 587 | ||
612 | switch( mode & TASK_TASK_MASK ) { | 588 | switch (mode & TASK_TASK_MASK) { |
613 | case TASK_STATS_TORRENTS: r += stats_torrents_mrtg( r ); break; | 589 | case TASK_STATS_TORRENTS: |
614 | case TASK_STATS_PEERS: r += stats_peers_mrtg( r ); break; | 590 | r += stats_torrents_mrtg(r); |
615 | case TASK_STATS_SLASH24S: r += stats_slash24s_txt( r, 128 ); break; | 591 | break; |
616 | case TASK_STATS_TOP10: r += stats_top_txt( r, 10 ); break; | 592 | case TASK_STATS_PEERS: |
617 | case TASK_STATS_TOP100: | 593 | r += stats_peers_mrtg(r); |
618 | r = iovec_fix_increase_or_free( iovec_entries, iovector, r, 4 * OT_STATS_TMPSIZE ); | 594 | break; |
619 | if( !r ) return; | 595 | case TASK_STATS_SLASH24S: |
620 | r += stats_top_txt( r, 100 ); break; | 596 | r += stats_slash24s_txt(r, 128); |
621 | case TASK_STATS_EVERYTHING: r += stats_return_everything( r ); break; | 597 | break; |
598 | case TASK_STATS_TOP10: | ||
599 | r += stats_top_txt(r, 10); | ||
600 | break; | ||
601 | case TASK_STATS_TOP100: | ||
602 | r = iovec_fix_increase_or_free(iovec_entries, iovector, r, 4 * OT_STATS_TMPSIZE); | ||
603 | if (!r) | ||
604 | return; | ||
605 | r += stats_top_txt(r, 100); | ||
606 | break; | ||
607 | case TASK_STATS_EVERYTHING: | ||
608 | r = iovec_fix_increase_or_free(iovec_entries, iovector, r, OT_STATS_TMPSIZE + 64 * OT_PEER_TIMEOUT); | ||
609 | if (!r) | ||
610 | return; | ||
611 | r += stats_return_everything(r); | ||
612 | break; | ||
622 | #ifdef WANT_SPOT_WOODPECKER | 613 | #ifdef WANT_SPOT_WOODPECKER |
623 | case TASK_STATS_WOODPECKERS: r += stats_return_woodpeckers( r, 128 ); break; | 614 | case TASK_STATS_WOODPECKERS: |
615 | r += stats_return_woodpeckers(r, 128); | ||
616 | break; | ||
624 | #endif | 617 | #endif |
625 | #ifdef WANT_FULLLOG_NETWORKS | 618 | #ifdef WANT_FULLLOG_NETWORKS |
626 | case TASK_STATS_FULLLOG: stats_return_fulllog( iovec_entries, iovector, r ); | 619 | case TASK_STATS_FULLLOG: |
627 | return; | 620 | stats_return_fulllog(iovec_entries, iovector, r); |
621 | return; | ||
628 | #endif | 622 | #endif |
629 | default: | 623 | default: |
630 | iovec_free(iovec_entries, iovector); | 624 | iovec_free(iovec_entries, iovector); |
631 | return; | 625 | return; |
632 | } | 626 | } |
633 | iovec_fixlast( iovec_entries, iovector, r ); | 627 | iovec_fixlast(iovec_entries, iovector, r); |
634 | } | 628 | } |
635 | 629 | ||
636 | void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ) { | 630 | void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data) { |
637 | switch( event ) { | 631 | switch (event) { |
638 | case EVENT_ACCEPT: | 632 | case EVENT_ACCEPT: |
639 | if( proto == FLAG_TCP ) ot_overall_tcp_connections++; else ot_overall_udp_connections++; | 633 | if (proto == FLAG_TCP) |
634 | ot_overall_tcp_connections++; | ||
635 | else | ||
636 | ot_overall_udp_connections++; | ||
640 | #ifdef WANT_LOG_NETWORKS | 637 | #ifdef WANT_LOG_NETWORKS |
641 | stat_increase_network_count( &stats_network_counters_root, 0, event_data ); | 638 | stat_increase_network_count(&stats_network_counters_root, 0, event_data); |
642 | #endif | 639 | #endif |
643 | break; | 640 | break; |
644 | case EVENT_ANNOUNCE: | 641 | case EVENT_ANNOUNCE: |
645 | if( proto == FLAG_TCP ) ot_overall_tcp_successfulannounces++; else ot_overall_udp_successfulannounces++; | 642 | if (proto == FLAG_TCP) |
646 | break; | 643 | ot_overall_tcp_successfulannounces++; |
647 | case EVENT_CONNECT: | 644 | else |
648 | if( proto == FLAG_TCP ) ot_overall_tcp_connects++; else ot_overall_udp_connects++; | 645 | ot_overall_udp_successfulannounces++; |
649 | break; | 646 | break; |
650 | case EVENT_COMPLETED: | 647 | case EVENT_CONNECT: |
648 | if (proto == FLAG_TCP) | ||
649 | ot_overall_tcp_connects++; | ||
650 | else | ||
651 | ot_overall_udp_connects++; | ||
652 | break; | ||
653 | case EVENT_COMPLETED: | ||
651 | #ifdef WANT_SYSLOGS | 654 | #ifdef WANT_SYSLOGS |
652 | if( event_data) { | 655 | if (event_data) { |
653 | struct ot_workstruct *ws = (struct ot_workstruct *)event_data; | 656 | struct ot_workstruct *ws = (struct ot_workstruct *)event_data; |
654 | char timestring[64]; | 657 | char timestring[64]; |
655 | char hash_hex[42], peerid_hex[42], ip_readable[64]; | 658 | char hash_hex[42], peerid_hex[42], ip_readable[64]; |
656 | struct tm time_now; | 659 | struct tm time_now; |
657 | time_t ttt; | 660 | time_t ttt; |
658 | 661 | ||
659 | time( &ttt ); | 662 | time(&ttt); |
660 | localtime_r( &ttt, &time_now ); | 663 | localtime_r(&ttt, &time_now); |
661 | strftime( timestring, sizeof( timestring ), "%FT%T%z", &time_now ); | 664 | strftime(timestring, sizeof(timestring), "%FT%T%z", &time_now); |
662 | 665 | ||
663 | to_hex( hash_hex, *ws->hash ); | 666 | to_hex(hash_hex, *ws->hash); |
664 | if( ws->peer_id ) | 667 | if (ws->peer_id) |
665 | to_hex( peerid_hex, (uint8_t*)ws->peer_id ); | 668 | to_hex(peerid_hex, (uint8_t *)ws->peer_id); |
666 | else { | 669 | else { |
667 | *peerid_hex=0; | 670 | *peerid_hex = 0; |
668 | } | 671 | } |
669 | 672 | ||
670 | #ifdef WANT_V6 | 673 | ip_readable[fmt_ip6c(ip_readable, (char *)&ws->peer)] = 0; |
671 | ip_readable[ fmt_ip6c( ip_readable, (char*)&ws->peer ) ] = 0; | 674 | #if 0 |
672 | #else | 675 | /* XXX */ |
673 | ip_readable[ fmt_ip4( ip_readable, (char*)&ws->peer ) ] = 0; | 676 | ip_readable[ fmt_ip4( ip_readable, (char*)&ws->peer ) ] = 0; |
674 | #endif | 677 | #endif |
675 | syslog( LOG_INFO, "time=%s event=completed info_hash=%s peer_id=%s ip=%s", timestring, hash_hex, peerid_hex, ip_readable ); | 678 | syslog(LOG_INFO, "time=%s event=completed info_hash=%s peer_id=%s ip=%s", timestring, hash_hex, peerid_hex, ip_readable); |
676 | } | ||
677 | #endif | ||
678 | ot_overall_completed++; | ||
679 | break; | ||
680 | case EVENT_SCRAPE: | ||
681 | if( proto == FLAG_TCP ) ot_overall_tcp_successfulscrapes++; else ot_overall_udp_successfulscrapes++; | ||
682 | break; | ||
683 | case EVENT_FULLSCRAPE: | ||
684 | ot_full_scrape_count++; | ||
685 | ot_full_scrape_size += event_data; | ||
686 | break; | ||
687 | case EVENT_FULLSCRAPE_REQUEST: | ||
688 | { | ||
689 | ot_ip6 *ip = (ot_ip6*)event_data; /* ugly hack to transfer ip to stats */ | ||
690 | char _debug[512]; | ||
691 | int off = snprintf( _debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time)/60 ); | ||
692 | off += fmt_ip6c( _debug+off, *ip ); | ||
693 | off += snprintf( _debug+off, sizeof(_debug)-off, " - FULL SCRAPE\n" ); | ||
694 | (void)write( 2, _debug, off ); | ||
695 | ot_full_scrape_request_count++; | ||
696 | } | 679 | } |
697 | break; | 680 | #endif |
698 | case EVENT_FULLSCRAPE_REQUEST_GZIP: | 681 | ot_overall_completed++; |
699 | { | 682 | break; |
700 | ot_ip6 *ip = (ot_ip6*)event_data; /* ugly hack to transfer ip to stats */ | 683 | case EVENT_SCRAPE: |
701 | char _debug[512]; | 684 | if (proto == FLAG_TCP) |
702 | int off = snprintf( _debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time)/60 ); | 685 | ot_overall_tcp_successfulscrapes++; |
703 | off += fmt_ip6c(_debug+off, *ip ); | 686 | else |
704 | off += snprintf( _debug+off, sizeof(_debug)-off, " - FULL SCRAPE\n" ); | 687 | ot_overall_udp_successfulscrapes++; |
705 | (void)write( 2, _debug, off ); | 688 | break; |
706 | ot_full_scrape_request_count++; | 689 | case EVENT_FULLSCRAPE: |
707 | } | 690 | ot_full_scrape_count++; |
708 | break; | 691 | ot_full_scrape_size += event_data; |
709 | case EVENT_FAILED: | 692 | break; |
710 | ot_failed_request_counts[event_data]++; | 693 | case EVENT_FULLSCRAPE_REQUEST: { |
711 | break; | 694 | ot_ip6 *ip = (ot_ip6 *)event_data; /* ugly hack to transfer ip to stats */ |
712 | case EVENT_RENEW: | 695 | char _debug[512]; |
713 | ot_renewed[event_data]++; | 696 | int off = snprintf(_debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time) / 60); |
714 | break; | 697 | off += fmt_ip6c(_debug + off, *ip); |
715 | case EVENT_SYNC: | 698 | off += snprintf(_debug + off, sizeof(_debug) - off, " - FULL SCRAPE\n"); |
716 | ot_overall_sync_count+=event_data; | 699 | (void)write(2, _debug, off); |
717 | break; | 700 | ot_full_scrape_request_count++; |
718 | case EVENT_BUCKET_LOCKED: | 701 | } break; |
719 | ot_overall_stall_count++; | 702 | case EVENT_FULLSCRAPE_REQUEST_GZIP: { |
720 | break; | 703 | ot_ip6 *ip = (ot_ip6 *)event_data; /* ugly hack to transfer ip to stats */ |
704 | char _debug[512]; | ||
705 | int off = snprintf(_debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time) / 60); | ||
706 | off += fmt_ip6c(_debug + off, *ip); | ||
707 | off += snprintf(_debug + off, sizeof(_debug) - off, " - FULL SCRAPE\n"); | ||
708 | (void)write(2, _debug, off); | ||
709 | ot_full_scrape_request_count++; | ||
710 | } break; | ||
711 | case EVENT_FAILED: | ||
712 | ot_failed_request_counts[event_data]++; | ||
713 | break; | ||
714 | case EVENT_RENEW: | ||
715 | ot_renewed[event_data]++; | ||
716 | break; | ||
717 | case EVENT_SYNC: | ||
718 | ot_overall_sync_count += event_data; | ||
719 | break; | ||
720 | case EVENT_BUCKET_LOCKED: | ||
721 | ot_overall_stall_count++; | ||
722 | break; | ||
721 | #ifdef WANT_SPOT_WOODPECKER | 723 | #ifdef WANT_SPOT_WOODPECKER |
722 | case EVENT_WOODPECKER: | 724 | case EVENT_WOODPECKER: |
723 | pthread_mutex_lock( &g_woodpeckers_mutex ); | 725 | pthread_mutex_lock(&g_woodpeckers_mutex); |
724 | stat_increase_network_count( &stats_woodpeckers_tree, 0, event_data ); | 726 | stat_increase_network_count(&stats_woodpeckers_tree, 0, event_data); |
725 | pthread_mutex_unlock( &g_woodpeckers_mutex ); | 727 | pthread_mutex_unlock(&g_woodpeckers_mutex); |
726 | break; | 728 | break; |
727 | #endif | 729 | #endif |
728 | case EVENT_CONNID_MISSMATCH: | 730 | case EVENT_CONNID_MISSMATCH: |
729 | ++ot_overall_udp_connectionidmissmatches; | 731 | ++ot_overall_udp_connectionidmissmatches; |
730 | default: | 732 | default: |
731 | break; | 733 | break; |
732 | } | 734 | } |
733 | } | 735 | } |
734 | 736 | ||
735 | void stats_cleanup() { | 737 | void stats_cleanup() { |
736 | #ifdef WANT_SPOT_WOODPECKER | 738 | #ifdef WANT_SPOT_WOODPECKER |
737 | pthread_mutex_lock( &g_woodpeckers_mutex ); | 739 | pthread_mutex_lock(&g_woodpeckers_mutex); |
738 | stats_shift_down_network_count( &stats_woodpeckers_tree, 0, 1 ); | 740 | stats_shift_down_network_count(&stats_woodpeckers_tree, 0, 1); |
739 | pthread_mutex_unlock( &g_woodpeckers_mutex ); | 741 | pthread_mutex_unlock(&g_woodpeckers_mutex); |
740 | #endif | 742 | #endif |
741 | } | 743 | } |
742 | 744 | ||
743 | static void * stats_worker( void * args ) { | 745 | static void *stats_worker(void *args) { |
744 | int iovec_entries; | 746 | int iovec_entries; |
745 | struct iovec *iovector; | 747 | struct iovec *iovector; |
746 | 748 | ||
747 | (void) args; | 749 | (void)args; |
748 | 750 | ||
749 | while( 1 ) { | 751 | while (1) { |
750 | ot_tasktype tasktype = TASK_STATS; | 752 | ot_tasktype tasktype = TASK_STATS; |
751 | ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); | 753 | ot_taskid taskid = mutex_workqueue_poptask(&tasktype); |
752 | stats_make( &iovec_entries, &iovector, tasktype ); | 754 | stats_make(&iovec_entries, &iovector, tasktype); |
753 | if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) | 755 | if (mutex_workqueue_pushresult(taskid, iovec_entries, iovector)) |
754 | iovec_free( &iovec_entries, &iovector ); | 756 | iovec_free(&iovec_entries, &iovector); |
755 | } | 757 | } |
756 | return NULL; | 758 | return NULL; |
757 | } | 759 | } |
758 | 760 | ||
759 | void stats_deliver( int64 sock, int tasktype ) { | 761 | void stats_deliver(int64 sock, int tasktype) { mutex_workqueue_pushtask(sock, tasktype); } |
760 | mutex_workqueue_pushtask( sock, tasktype ); | ||
761 | } | ||
762 | 762 | ||
763 | static pthread_t thread_id; | 763 | static pthread_t thread_id; |
764 | void stats_init( ) { | 764 | void stats_init() { |
765 | ot_start_time = g_now_seconds; | 765 | ot_start_time = g_now_seconds; |
766 | pthread_create( &thread_id, NULL, stats_worker, NULL ); | 766 | pthread_create(&thread_id, NULL, stats_worker, NULL); |
767 | } | 767 | } |
768 | 768 | ||
769 | void stats_deinit( ) { | 769 | void stats_deinit() { |
770 | pthread_cancel( thread_id ); | 770 | pthread_cancel(thread_id); |
771 | } | 771 | } |
772 | |||
773 | const char *g_version_stats_c = "$Source$: $Revision$\n"; | ||
@@ -6,10 +6,12 @@ | |||
6 | #ifndef OT_STATS_H__ | 6 | #ifndef OT_STATS_H__ |
7 | #define OT_STATS_H__ | 7 | #define OT_STATS_H__ |
8 | 8 | ||
9 | #include "trackerlogic.h" | ||
10 | |||
9 | typedef enum { | 11 | typedef enum { |
10 | EVENT_ACCEPT, | 12 | EVENT_ACCEPT, |
11 | EVENT_READ, | 13 | EVENT_READ, |
12 | EVENT_CONNECT, /* UDP only */ | 14 | EVENT_CONNECT, /* UDP only */ |
13 | EVENT_ANNOUNCE, | 15 | EVENT_ANNOUNCE, |
14 | EVENT_COMPLETED, | 16 | EVENT_COMPLETED, |
15 | EVENT_RENEW, | 17 | EVENT_RENEW, |
@@ -17,7 +19,8 @@ typedef enum { | |||
17 | EVENT_SCRAPE, | 19 | EVENT_SCRAPE, |
18 | EVENT_FULLSCRAPE_REQUEST, | 20 | EVENT_FULLSCRAPE_REQUEST, |
19 | EVENT_FULLSCRAPE_REQUEST_GZIP, | 21 | EVENT_FULLSCRAPE_REQUEST_GZIP, |
20 | EVENT_FULLSCRAPE, /* TCP only */ | 22 | EVENT_FULLSCRAPE_REQUEST_ZSTD, |
23 | EVENT_FULLSCRAPE, /* TCP only */ | ||
21 | EVENT_FAILED, | 24 | EVENT_FAILED, |
22 | EVENT_BUCKET_LOCKED, | 25 | EVENT_BUCKET_LOCKED, |
23 | EVENT_WOODPECKER, | 26 | EVENT_WOODPECKER, |
@@ -38,15 +41,12 @@ enum { | |||
38 | CODE_HTTPERROR_COUNT | 41 | CODE_HTTPERROR_COUNT |
39 | }; | 42 | }; |
40 | 43 | ||
41 | void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ); | 44 | void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data); |
42 | void stats_deliver( int64 sock, int tasktype ); | 45 | void stats_deliver(int64 sock, int tasktype); |
43 | void stats_cleanup(); | 46 | void stats_cleanup(void); |
44 | size_t return_stats_for_tracker( char *reply, int mode, int format ); | 47 | size_t return_stats_for_tracker(char *reply, int mode, int format); |
45 | size_t stats_return_tracker_version( char *reply ); | 48 | size_t stats_return_tracker_version(char *reply); |
46 | void stats_init( ); | 49 | void stats_init(void); |
47 | void stats_deinit( ); | 50 | void stats_deinit(void); |
48 | |||
49 | extern const char *g_version_rijndael_c; | ||
50 | extern const char *g_version_livesync_c; | ||
51 | 51 | ||
52 | #endif | 52 | #endif |
@@ -4,64 +4,66 @@ | |||
4 | $id$ */ | 4 | $id$ */ |
5 | 5 | ||
6 | /* System */ | 6 | /* System */ |
7 | #include <sys/types.h> | 7 | #include <pthread.h> |
8 | #include <sys/mman.h> | ||
9 | #include <sys/uio.h> | ||
10 | #include <stdio.h> | 8 | #include <stdio.h> |
11 | #include <string.h> | 9 | #include <string.h> |
12 | #include <pthread.h> | 10 | #include <sys/mman.h> |
11 | #include <sys/types.h> | ||
12 | #include <sys/uio.h> | ||
13 | 13 | ||
14 | /* Libowfat */ | 14 | /* Libowfat */ |
15 | #include "scan.h" | ||
16 | #include "byte.h" | 15 | #include "byte.h" |
17 | #include "io.h" | 16 | #include "io.h" |
17 | #include "scan.h" | ||
18 | 18 | ||
19 | /* Opentracker */ | 19 | /* Opentracker */ |
20 | #include "trackerlogic.h" | 20 | #include "ot_iovec.h" |
21 | #include "ot_mutex.h" | 21 | #include "ot_mutex.h" |
22 | #include "ot_sync.h" | ||
23 | #include "ot_stats.h" | 22 | #include "ot_stats.h" |
24 | #include "ot_iovec.h" | 23 | #include "ot_sync.h" |
24 | #include "trackerlogic.h" | ||
25 | 25 | ||
26 | #ifdef WANT_SYNC_BATCH | 26 | #ifdef WANT_SYNC_BATCH |
27 | 27 | ||
28 | #define OT_SYNC_CHUNK_SIZE (512*1024) | 28 | #define OT_SYNC_CHUNK_SIZE (512 * 1024) |
29 | 29 | ||
30 | /* Import Changeset from an external authority | 30 | /* Import Changeset from an external authority |
31 | format: d4:syncd[..]ee | 31 | format: d4:syncd[..]ee |
32 | [..]: ( 20:01234567890abcdefghij16:XXXXYYYY )+ | 32 | [..]: ( 20:01234567890abcdefghij16:XXXXYYYY )+ |
33 | */ | 33 | */ |
34 | int add_changeset_to_tracker( uint8_t *data, size_t len ) { | 34 | int add_changeset_to_tracker(uint8_t *data, size_t len) { |
35 | ot_hash *hash; | 35 | ot_hash *hash; |
36 | uint8_t *end = data + len; | 36 | uint8_t *end = data + len; |
37 | unsigned long peer_count; | 37 | unsigned long peer_count; |
38 | 38 | ||
39 | /* We do know, that the string is \n terminated, so it cant | 39 | /* We do know, that the string is \n terminated, so it cant |
40 | overflow */ | 40 | overflow */ |
41 | if( byte_diff( data, 8, "d4:syncd" ) ) return -1; | 41 | if (byte_diff(data, 8, "d4:syncd")) |
42 | return -1; | ||
42 | data += 8; | 43 | data += 8; |
43 | 44 | ||
44 | while( 1 ) { | 45 | while (1) { |
45 | if( byte_diff( data, 3, "20:" ) ) { | 46 | if (byte_diff(data, 3, "20:")) { |
46 | if( byte_diff( data, 2, "ee" ) ) | 47 | if (byte_diff(data, 2, "ee")) |
47 | return -1; | 48 | return -1; |
48 | return 0; | 49 | return 0; |
49 | } | 50 | } |
50 | data += 3; | 51 | data += 3; |
51 | hash = (ot_hash*)data; | 52 | hash = (ot_hash *)data; |
52 | data += sizeof( ot_hash ); | 53 | data += sizeof(ot_hash); |
53 | 54 | ||
54 | /* Scan string length indicator */ | 55 | /* Scan string length indicator */ |
55 | data += ( len = scan_ulong( (char*)data, &peer_count ) ); | 56 | data += (len = scan_ulong((char *)data, &peer_count)); |
56 | 57 | ||
57 | /* If no long was scanned, it is not divisible by 8, it is not | 58 | /* If no long was scanned, it is not divisible by 8, it is not |
58 | followed by a colon or claims to need to much memory, we fail */ | 59 | followed by a colon or claims to need to much memory, we fail */ |
59 | if( !len || !peer_count || ( peer_count & 7 ) || ( *data++ != ':' ) || ( data + peer_count > end ) ) | 60 | if (!len || !peer_count || (peer_count & 7) || (*data++ != ':') || (data + peer_count > end)) |
60 | return -1; | 61 | return -1; |
61 | 62 | ||
62 | while( peer_count > 0 ) { | 63 | while (peer_count > 0) { |
63 | add_peer_to_torrent( hash, (ot_peer*)data, 1 ); | 64 | add_peer_to_torrent(hash, (ot_peer *)data, 1); |
64 | data += 8; peer_count -= 8; | 65 | data += 8; |
66 | peer_count -= 8; | ||
65 | } | 67 | } |
66 | } | 68 | } |
67 | return 0; | 69 | return 0; |
@@ -70,80 +72,86 @@ int add_changeset_to_tracker( uint8_t *data, size_t len ) { | |||
70 | /* Proposed output format | 72 | /* Proposed output format |
71 | d4:syncd20:<info_hash>8*N:(xxxxyyyy)*Nee | 73 | d4:syncd20:<info_hash>8*N:(xxxxyyyy)*Nee |
72 | */ | 74 | */ |
73 | static void sync_make( int *iovec_entries, struct iovec **iovector ) { | 75 | static void sync_make(int *iovec_entries, struct iovec **iovector) { |
74 | int bucket; | 76 | int bucket; |
75 | char *r, *re; | 77 | char *r, *re; |
76 | 78 | ||
77 | /* Setup return vector... */ | 79 | /* Setup return vector... */ |
78 | *iovec_entries = 0; | 80 | *iovec_entries = 0; |
79 | *iovector = NULL; | 81 | *iovector = NULL; |
80 | if( !( r = iovec_increase( iovec_entries, iovector, OT_SYNC_CHUNK_SIZE ) ) ) | 82 | if (!(r = iovec_increase(iovec_entries, iovector, OT_SYNC_CHUNK_SIZE))) |
81 | return; | 83 | return; |
82 | 84 | ||
83 | /* ... and pointer to end of current output buffer. | 85 | /* ... and pointer to end of current output buffer. |
84 | This works as a low watermark */ | 86 | This works as a low watermark */ |
85 | re = r + OT_SYNC_CHUNK_SIZE; | 87 | re = r + OT_SYNC_CHUNK_SIZE; |
86 | 88 | ||
87 | memmove( r, "d4:syncd", 8 ); r += 8; | 89 | memmove(r, "d4:syncd", 8); |
90 | r += 8; | ||
88 | 91 | ||
89 | /* For each bucket... */ | 92 | /* For each bucket... */ |
90 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 93 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
91 | /* Get exclusive access to that bucket */ | 94 | /* Get exclusive access to that bucket */ |
92 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 95 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
93 | size_t tor_offset; | 96 | size_t tor_offset; |
94 | 97 | ||
95 | /* For each torrent in this bucket.. */ | 98 | /* For each torrent in this bucket.. */ |
96 | for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { | 99 | for (tor_offset = 0; tor_offset < torrents_list->size; ++tor_offset) { |
97 | /* Address torrents members */ | 100 | /* Address torrents members */ |
98 | ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[tor_offset] ).peer_list; | 101 | ot_peerlist *peer_list = (((ot_torrent *)(torrents_list->data))[tor_offset]).peer_list; |
99 | ot_hash *hash =&( ((ot_torrent*)(torrents_list->data))[tor_offset] ).hash; | 102 | ot_hash *hash = &(((ot_torrent *)(torrents_list->data))[tor_offset]).hash; |
100 | const size_t byte_count = sizeof(ot_peer) * peer_list->changeset.size; | 103 | const size_t byte_count = sizeof(ot_peer) * peer_list->changeset.size; |
101 | 104 | ||
102 | /* If we reached our low watermark in buffer... */ | 105 | /* If we reached our low watermark in buffer... */ |
103 | if( re - r <= (ssize_t)(/* strlen( "20:" ) == */ 3 + sizeof( ot_hash ) + /* strlen_max( "%zd" ) == */ 12 + byte_count ) ) { | 106 | if (re - r <= (ssize_t)(/* strlen( "20:" ) == */ 3 + sizeof(ot_hash) + /* strlen_max( "%zd" ) == */ 12 + byte_count)) { |
104 | 107 | ||
105 | /* Allocate a fresh output buffer at the end of our buffers list | 108 | /* Allocate a fresh output buffer at the end of our buffers list |
106 | release bucket and return, if that fails */ | 109 | release bucket and return, if that fails */ |
107 | if( !( r = iovec_fix_increase_or_free( iovec_entries, iovector, r, OT_SYNC_CHUNK_SIZE ) ) ) | 110 | if (!(r = iovec_fix_increase_or_free(iovec_entries, iovector, r, OT_SYNC_CHUNK_SIZE))) |
108 | return mutex_bucket_unlock( bucket ); | 111 | return mutex_bucket_unlock(bucket); |
109 | 112 | ||
110 | /* Adjust new end of output buffer */ | 113 | /* Adjust new end of output buffer */ |
111 | re = r + OT_SYNC_CHUNK_SIZE; | 114 | re = r + OT_SYNC_CHUNK_SIZE; |
112 | } | 115 | } |
113 | 116 | ||
114 | *r++ = '2'; *r++ = '0'; *r++ = ':'; | 117 | *r++ = '2'; |
115 | memmove( r, hash, sizeof( ot_hash ) ); r += sizeof( ot_hash ); | 118 | *r++ = '0'; |
116 | r += sprintf( r, "%zd:", byte_count ); | 119 | *r++ = ':'; |
117 | memmove( r, peer_list->changeset.data, byte_count ); r += byte_count; | 120 | memmove(r, hash, sizeof(ot_hash)); |
121 | r += sizeof(ot_hash); | ||
122 | r += sprintf(r, "%zd:", byte_count); | ||
123 | memmove(r, peer_list->changeset.data, byte_count); | ||
124 | r += byte_count; | ||
118 | } | 125 | } |
119 | 126 | ||
120 | /* All torrents done: release lock on currenct bucket */ | 127 | /* All torrents done: release lock on currenct bucket */ |
121 | mutex_bucket_unlock( bucket ); | 128 | mutex_bucket_unlock(bucket); |
122 | } | 129 | } |
123 | 130 | ||
124 | /* Close bencoded sync dictionary */ | 131 | /* Close bencoded sync dictionary */ |
125 | *r++='e'; *r++='e'; | 132 | *r++ = 'e'; |
133 | *r++ = 'e'; | ||
126 | 134 | ||
127 | /* Release unused memory in current output buffer */ | 135 | /* Release unused memory in current output buffer */ |
128 | iovec_fixlast( iovec_entries, iovector, r ); | 136 | iovec_fixlast(iovec_entries, iovector, r); |
129 | } | 137 | } |
130 | 138 | ||
131 | /* This is the entry point into this worker thread | 139 | /* This is the entry point into this worker thread |
132 | It grabs tasks from mutex_tasklist and delivers results back | 140 | It grabs tasks from mutex_tasklist and delivers results back |
133 | */ | 141 | */ |
134 | static void * sync_worker( void * args) { | 142 | static void *sync_worker(void *args) { |
135 | int iovec_entries; | 143 | int iovec_entries; |
136 | struct iovec *iovector; | 144 | struct iovec *iovector; |
137 | 145 | ||
138 | args = args; | 146 | args = args; |
139 | 147 | ||
140 | while( 1 ) { | 148 | while (1) { |
141 | ot_tasktype tasktype = TASK_SYNC_OUT; | 149 | ot_tasktype tasktype = TASK_SYNC_OUT; |
142 | ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); | 150 | ot_taskid taskid = mutex_workqueue_poptask(&tasktype); |
143 | sync_make( &iovec_entries, &iovector ); | 151 | sync_make(&iovec_entries, &iovector); |
144 | stats_issue_event( EVENT_SYNC_OUT, FLAG_TCP, iovec_length( &iovec_entries, &iovector) ); | 152 | stats_issue_event(EVENT_SYNC_OUT, FLAG_TCP, iovec_length(&iovec_entries, &iovector)); |
145 | if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) | 153 | if (mutex_workqueue_pushresult(taskid, iovec_entries, iovector)) |
146 | iovec_free( &iovec_entries, &iovector ); | 154 | iovec_free(&iovec_entries, &iovector); |
147 | } | 155 | } |
148 | return NULL; | 156 | return NULL; |
149 | } | 157 | } |
@@ -162,5 +170,3 @@ void sync_deliver( int64 socket ) { | |||
162 | } | 170 | } |
163 | 171 | ||
164 | #endif | 172 | #endif |
165 | |||
166 | const char *g_version_sync_c = "$Source$: $Revision$\n"; | ||
@@ -9,11 +9,11 @@ | |||
9 | #ifdef WANT_SYNC_BATCH | 9 | #ifdef WANT_SYNC_BATCH |
10 | enum { SYNC_IN, SYNC_OUT }; | 10 | enum { SYNC_IN, SYNC_OUT }; |
11 | 11 | ||
12 | void sync_init( ); | 12 | void sync_init(); |
13 | void sync_deinit( ); | 13 | void sync_deinit(); |
14 | void sync_deliver( int64 socket ); | 14 | void sync_deliver(int64 socket); |
15 | 15 | ||
16 | int add_changeset_to_tracker( uint8_t *data, size_t len ); | 16 | int add_changeset_to_tracker(uint8_t *data, size_t len); |
17 | #else | 17 | #else |
18 | 18 | ||
19 | #define sync_init() | 19 | #define sync_init() |
@@ -4,30 +4,31 @@ | |||
4 | $id$ */ | 4 | $id$ */ |
5 | 5 | ||
6 | /* System */ | 6 | /* System */ |
7 | #include <stdlib.h> | ||
8 | #include <pthread.h> | ||
9 | #include <string.h> | ||
10 | #include <arpa/inet.h> | 7 | #include <arpa/inet.h> |
8 | #include <pthread.h> | ||
11 | #include <stdio.h> | 9 | #include <stdio.h> |
10 | #include <stdlib.h> | ||
11 | #include <string.h> | ||
12 | 12 | ||
13 | /* Libowfat */ | 13 | /* Libowfat */ |
14 | #include "socket.h" | ||
15 | #include "io.h" | 14 | #include "io.h" |
15 | #include "ip6.h" | ||
16 | #include "socket.h" | ||
16 | 17 | ||
17 | /* Opentracker */ | 18 | /* Opentracker */ |
18 | #include "trackerlogic.h" | ||
19 | #include "ot_udp.h" | ||
20 | #include "ot_stats.h" | ||
21 | #include "ot_rijndael.h" | 19 | #include "ot_rijndael.h" |
20 | #include "ot_stats.h" | ||
21 | #include "ot_udp.h" | ||
22 | #include "trackerlogic.h" | ||
22 | 23 | ||
23 | #if 0 | 24 | #if 0 |
24 | static const uint8_t g_static_connid[8] = { 0x23, 0x42, 0x05, 0x17, 0xde, 0x41, 0x50, 0xff }; | 25 | static const uint8_t g_static_connid[8] = { 0x23, 0x42, 0x05, 0x17, 0xde, 0x41, 0x50, 0xff }; |
25 | #endif | 26 | #endif |
26 | static uint32_t g_rijndael_round_key[44] = {0}; | 27 | static uint32_t g_rijndael_round_key[44] = {0}; |
27 | static uint32_t g_key_of_the_hour[2] = {0}; | 28 | static uint32_t g_key_of_the_hour[2] = {0}; |
28 | static ot_time g_hour_of_the_key; | 29 | static ot_time g_hour_of_the_key; |
29 | 30 | ||
30 | static void udp_generate_rijndael_round_key() { | 31 | static void udp_generate_rijndael_round_key() { |
31 | uint32_t key[16]; | 32 | uint32_t key[16]; |
32 | #ifdef WANT_ARC4RANDOM | 33 | #ifdef WANT_ARC4RANDOM |
33 | arc4random_buf(&key[0], sizeof(key)); | 34 | arc4random_buf(&key[0], sizeof(key)); |
@@ -37,7 +38,7 @@ static void udp_generate_rijndael_round_key() { | |||
37 | key[2] = random(); | 38 | key[2] = random(); |
38 | key[3] = random(); | 39 | key[3] = random(); |
39 | #endif | 40 | #endif |
40 | rijndaelKeySetupEnc128( g_rijndael_round_key, (uint8_t*)key ); | 41 | rijndaelKeySetupEnc128(g_rijndael_round_key, (uint8_t *)key); |
41 | 42 | ||
42 | #ifdef WANT_ARC4RANDOM | 43 | #ifdef WANT_ARC4RANDOM |
43 | g_key_of_the_hour[0] = arc4random(); | 44 | g_key_of_the_hour[0] = arc4random(); |
@@ -48,180 +49,188 @@ static void udp_generate_rijndael_round_key() { | |||
48 | } | 49 | } |
49 | 50 | ||
50 | /* Generate current and previous connection id for ip */ | 51 | /* Generate current and previous connection id for ip */ |
51 | static void udp_make_connectionid( uint32_t connid[2], const ot_ip6 remoteip, int age ) { | 52 | static void udp_make_connectionid(uint32_t connid[2], const ot_ip6 remoteip, int age) { |
52 | uint32_t plain[4], crypt[4]; | 53 | uint32_t plain[4], crypt[4]; |
53 | int i; | 54 | int i; |
54 | if( g_now_minutes + 60 > g_hour_of_the_key ) { | 55 | if (g_now_minutes + 60 > g_hour_of_the_key) { |
55 | g_hour_of_the_key = g_now_minutes; | 56 | g_hour_of_the_key = g_now_minutes; |
56 | g_key_of_the_hour[1] = g_key_of_the_hour[0]; | 57 | g_key_of_the_hour[1] = g_key_of_the_hour[0]; |
57 | #ifdef WANT_ARC4RANDOM | 58 | #ifdef WANT_ARC4RANDOM |
58 | g_key_of_the_hour[0] = arc4random(); | 59 | g_key_of_the_hour[0] = arc4random(); |
59 | #else | 60 | #else |
60 | g_key_of_the_hour[0] = random(); | 61 | g_key_of_the_hour[0] = random(); |
61 | #endif | 62 | #endif |
62 | } | 63 | } |
63 | 64 | ||
64 | memcpy( plain, remoteip, sizeof( plain ) ); | 65 | memcpy(plain, remoteip, sizeof(plain)); |
65 | for( i=0; i<4; ++i ) plain[i] ^= g_key_of_the_hour[age]; | 66 | for (i = 0; i < 4; ++i) |
66 | rijndaelEncrypt128( g_rijndael_round_key, (uint8_t*)remoteip, (uint8_t*)crypt ); | 67 | plain[i] ^= g_key_of_the_hour[age]; |
68 | rijndaelEncrypt128(g_rijndael_round_key, (uint8_t *)remoteip, (uint8_t *)crypt); | ||
67 | connid[0] = crypt[0] ^ crypt[1]; | 69 | connid[0] = crypt[0] ^ crypt[1]; |
68 | connid[1] = crypt[2] ^ crypt[3]; | 70 | connid[1] = crypt[2] ^ crypt[3]; |
69 | } | 71 | } |
70 | 72 | ||
71 | /* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */ | 73 | /* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */ |
72 | int handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { | 74 | int handle_udp6(int64 serversocket, struct ot_workstruct *ws) { |
73 | ot_ip6 remoteip; | 75 | ot_ip6 remoteip; |
74 | uint32_t *inpacket = (uint32_t*)ws->inbuf; | 76 | uint32_t *inpacket = (uint32_t *)ws->inbuf; |
75 | uint32_t *outpacket = (uint32_t*)ws->outbuf; | 77 | uint32_t *outpacket = (uint32_t *)ws->outbuf; |
76 | uint32_t numwant, left, event, scopeid; | 78 | uint32_t left, event, scopeid; |
77 | uint32_t connid[2]; | 79 | uint32_t connid[2]; |
78 | uint32_t action; | 80 | uint32_t action; |
79 | uint16_t port, remoteport; | 81 | uint16_t port, remoteport; |
80 | size_t byte_count, scrape_count; | 82 | size_t byte_count, scrape_count; |
81 | 83 | ||
82 | byte_count = socket_recv6( serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid ); | 84 | byte_count = socket_recv6(serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid); |
83 | if( !byte_count ) return 0; | 85 | if (!byte_count) |
84 | 86 | return 0; | |
85 | stats_issue_event( EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip ); | 87 | |
86 | stats_issue_event( EVENT_READ, FLAG_UDP, byte_count ); | 88 | stats_issue_event(EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip); |
89 | stats_issue_event(EVENT_READ, FLAG_UDP, byte_count); | ||
87 | 90 | ||
88 | /* Minimum udp tracker packet size, also catches error */ | 91 | /* Minimum udp tracker packet size, also catches error */ |
89 | if( byte_count < 16 ) | 92 | if (byte_count < 16) |
90 | return 1; | 93 | return 1; |
91 | 94 | ||
92 | /* Get action to take. Ignore error messages and broken packets */ | 95 | /* Get action to take. Ignore error messages and broken packets */ |
93 | action = ntohl( inpacket[2] ); | 96 | action = ntohl(inpacket[2]); |
94 | if( action > 2 ) | 97 | if (action > 2) |
95 | return 1; | 98 | return 1; |
96 | 99 | ||
97 | /* Generate the connection id we give out and expect to and from | 100 | /* Generate the connection id we give out and expect to and from |
98 | the requesting ip address, this prevents udp spoofing */ | 101 | the requesting ip address, this prevents udp spoofing */ |
99 | udp_make_connectionid( connid, remoteip, 0 ); | 102 | udp_make_connectionid(connid, remoteip, 0); |
100 | 103 | ||
101 | /* Initialise hash pointer */ | 104 | /* Initialise hash pointer */ |
102 | ws->hash = NULL; | 105 | ws->hash = NULL; |
103 | ws->peer_id = NULL; | 106 | ws->peer_id = NULL; |
104 | 107 | ||
105 | /* If action is not 0 (connect), then we expect the derived | 108 | /* If action is not 0 (connect), then we expect the derived |
106 | connection id in first 64 bit */ | 109 | connection id in first 64 bit */ |
107 | if( ( action > 0 ) && ( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) ) { | 110 | if ((action > 0) && (inpacket[0] != connid[0] || inpacket[1] != connid[1])) { |
108 | /* If connection id does not match, try the one that was | 111 | /* If connection id does not match, try the one that was |
109 | valid in the previous hour. Only if this also does not | 112 | valid in the previous hour. Only if this also does not |
110 | match, return an error packet */ | 113 | match, return an error packet */ |
111 | udp_make_connectionid( connid, remoteip, 1 ); | 114 | udp_make_connectionid(connid, remoteip, 1); |
112 | if( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) { | 115 | if (inpacket[0] != connid[0] || inpacket[1] != connid[1]) { |
113 | const size_t s = sizeof( "Connection ID missmatch." ); | 116 | const size_t s = sizeof("Connection ID missmatch."); |
114 | outpacket[0] = htonl( 3 ); outpacket[1] = inpacket[3]; | 117 | outpacket[0] = htonl(3); |
115 | memcpy( &outpacket[2], "Connection ID missmatch.", s ); | 118 | outpacket[1] = inpacket[3]; |
116 | socket_send6( serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0 ); | 119 | memcpy(&outpacket[2], "Connection ID missmatch.", s); |
117 | stats_issue_event( EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s ); | 120 | socket_send6(serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0); |
121 | stats_issue_event(EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s); | ||
118 | return 1; | 122 | return 1; |
119 | } | 123 | } |
120 | } | 124 | } |
121 | 125 | ||
122 | switch( action ) { | 126 | switch (action) { |
123 | case 0: /* This is a connect action */ | 127 | case 0: /* This is a connect action */ |
124 | /* look for udp bittorrent magic id */ | 128 | /* look for udp bittorrent magic id */ |
125 | if( (ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980) ) | 129 | if ((ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980)) |
126 | return 1; | 130 | return 1; |
131 | |||
132 | outpacket[0] = 0; | ||
133 | outpacket[1] = inpacket[3]; | ||
134 | outpacket[2] = connid[0]; | ||
135 | outpacket[3] = connid[1]; | ||
136 | |||
137 | socket_send6(serversocket, ws->outbuf, 16, remoteip, remoteport, 0); | ||
138 | stats_issue_event(EVENT_CONNECT, FLAG_UDP, 16); | ||
139 | break; | ||
140 | case 1: /* This is an announce action */ | ||
141 | /* Minimum udp announce packet size */ | ||
142 | if (byte_count < 98) | ||
143 | return 1; | ||
144 | |||
145 | /* We do only want to know, if it is zero */ | ||
146 | left = inpacket[64 / 4] | inpacket[68 / 4]; | ||
127 | 147 | ||
128 | outpacket[0] = 0; | 148 | event = ntohl(inpacket[80 / 4]); |
129 | outpacket[1] = inpacket[3]; | 149 | port = *(uint16_t *)(((char *)inpacket) + 96); |
130 | outpacket[2] = connid[0]; | 150 | ws->hash = (ot_hash *)(((char *)inpacket) + 16); |
131 | outpacket[3] = connid[1]; | ||
132 | 151 | ||
133 | socket_send6( serversocket, ws->outbuf, 16, remoteip, remoteport, 0 ); | 152 | OT_SETIP(ws->peer, remoteip); |
134 | stats_issue_event( EVENT_CONNECT, FLAG_UDP, 16 ); | 153 | OT_SETPORT(ws->peer, &port); |
154 | OT_PEERFLAG(ws->peer) = 0; | ||
155 | |||
156 | switch (event) { | ||
157 | case 1: | ||
158 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED; | ||
159 | break; | ||
160 | case 3: | ||
161 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED; | ||
135 | break; | 162 | break; |
136 | case 1: /* This is an announce action */ | 163 | default: |
137 | /* Minimum udp announce packet size */ | ||
138 | if( byte_count < 98 ) | ||
139 | return 1; | ||
140 | |||
141 | /* We do only want to know, if it is zero */ | ||
142 | left = inpacket[64/4] | inpacket[68/4]; | ||
143 | |||
144 | /* Limit amount of peers to 200 */ | ||
145 | numwant = ntohl( inpacket[92/4] ); | ||
146 | if (numwant > 200) numwant = 200; | ||
147 | |||
148 | event = ntohl( inpacket[80/4] ); | ||
149 | port = *(uint16_t*)( ((char*)inpacket) + 96 ); | ||
150 | ws->hash = (ot_hash*)( ((char*)inpacket) + 16 ); | ||
151 | |||
152 | OT_SETIP( &ws->peer, remoteip ); | ||
153 | OT_SETPORT( &ws->peer, &port ); | ||
154 | OT_PEERFLAG( &ws->peer ) = 0; | ||
155 | |||
156 | switch( event ) { | ||
157 | case 1: OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED; break; | ||
158 | case 3: OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED; break; | ||
159 | default: break; | ||
160 | } | ||
161 | |||
162 | if( !left ) | ||
163 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_SEEDING; | ||
164 | |||
165 | outpacket[0] = htonl( 1 ); /* announce action */ | ||
166 | outpacket[1] = inpacket[12/4]; | ||
167 | |||
168 | if( OT_PEERFLAG( &ws->peer ) & PEER_FLAG_STOPPED ) { /* Peer is gone. */ | ||
169 | ws->reply = ws->outbuf; | ||
170 | ws->reply_size = remove_peer_from_torrent( FLAG_UDP, ws ); | ||
171 | } else { | ||
172 | ws->reply = ws->outbuf + 8; | ||
173 | ws->reply_size = 8 + add_peer_to_torrent_and_return_peers( FLAG_UDP, ws, numwant ); | ||
174 | } | ||
175 | |||
176 | socket_send6( serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0 ); | ||
177 | stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size ); | ||
178 | break; | 164 | break; |
165 | } | ||
179 | 166 | ||
180 | case 2: /* This is a scrape action */ | 167 | if (!left) |
181 | outpacket[0] = htonl( 2 ); /* scrape action */ | 168 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_SEEDING; |
182 | outpacket[1] = inpacket[12/4]; | ||
183 | 169 | ||
184 | for( scrape_count = 0; ( scrape_count * 20 < byte_count - 16) && ( scrape_count <= 74 ); scrape_count++ ) | 170 | outpacket[0] = htonl(1); /* announce action */ |
185 | return_udp_scrape_for_torrent( *(ot_hash*)( ((char*)inpacket) + 16 + 20 * scrape_count ), ((char*)outpacket) + 8 + 12 * scrape_count ); | 171 | outpacket[1] = inpacket[12 / 4]; |
186 | 172 | ||
187 | socket_send6( serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0 ); | 173 | if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED) { /* Peer is gone. */ |
188 | stats_issue_event( EVENT_SCRAPE, FLAG_UDP, scrape_count ); | 174 | ws->reply = ws->outbuf; |
189 | break; | 175 | ws->reply_size = remove_peer_from_torrent(FLAG_UDP, ws); |
176 | } else { | ||
177 | /* Limit amount of peers to OT_MAX_PEERS_UDP */ | ||
178 | uint32_t numwant = ntohl(inpacket[92 / 4]); | ||
179 | size_t max_peers = ip6_isv4mapped(remoteip) ? OT_MAX_PEERS_UDP4 : OT_MAX_PEERS_UDP6; | ||
180 | if (numwant > max_peers) | ||
181 | numwant = max_peers; | ||
182 | |||
183 | ws->reply = ws->outbuf + 8; | ||
184 | ws->reply_size = 8 + add_peer_to_torrent_and_return_peers(FLAG_UDP, ws, numwant); | ||
185 | } | ||
186 | |||
187 | socket_send6(serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0); | ||
188 | stats_issue_event(EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size); | ||
189 | break; | ||
190 | |||
191 | case 2: /* This is a scrape action */ | ||
192 | outpacket[0] = htonl(2); /* scrape action */ | ||
193 | outpacket[1] = inpacket[12 / 4]; | ||
194 | |||
195 | for (scrape_count = 0; (scrape_count * 20 < byte_count - 16) && (scrape_count <= 74); scrape_count++) | ||
196 | return_udp_scrape_for_torrent(*(ot_hash *)(((char *)inpacket) + 16 + 20 * scrape_count), ((char *)outpacket) + 8 + 12 * scrape_count); | ||
197 | |||
198 | socket_send6(serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0); | ||
199 | stats_issue_event(EVENT_SCRAPE, FLAG_UDP, scrape_count); | ||
200 | break; | ||
190 | } | 201 | } |
191 | return 1; | 202 | return 1; |
192 | } | 203 | } |
193 | 204 | ||
194 | static void* udp_worker( void * args ) { | 205 | static void *udp_worker(void *args) { |
195 | int64 sock = (int64)args; | 206 | int64 sock = (int64)args; |
196 | struct ot_workstruct ws; | 207 | struct ot_workstruct ws; |
197 | memset( &ws, 0, sizeof(ws) ); | 208 | memset(&ws, 0, sizeof(ws)); |
198 | 209 | ||
199 | ws.inbuf=malloc(G_INBUF_SIZE); | 210 | ws.inbuf = malloc(G_INBUF_SIZE); |
200 | ws.outbuf=malloc(G_OUTBUF_SIZE); | 211 | ws.outbuf = malloc(G_OUTBUF_SIZE); |
201 | #ifdef _DEBUG_HTTPERROR | 212 | #ifdef _DEBUG_HTTPERROR |
202 | ws.debugbuf=malloc(G_DEBUGBUF_SIZE); | 213 | ws.debugbuf = malloc(G_DEBUGBUF_SIZE); |
203 | #endif | 214 | #endif |
204 | 215 | ||
205 | while( g_opentracker_running ) | 216 | while (g_opentracker_running) |
206 | handle_udp6( sock, &ws ); | 217 | handle_udp6(sock, &ws); |
207 | 218 | ||
208 | free( ws.inbuf ); | 219 | free(ws.inbuf); |
209 | free( ws.outbuf ); | 220 | free(ws.outbuf); |
210 | #ifdef _DEBUG_HTTPERROR | 221 | #ifdef _DEBUG_HTTPERROR |
211 | free( ws.debugbuf ); | 222 | free(ws.debugbuf); |
212 | #endif | 223 | #endif |
213 | return NULL; | 224 | return NULL; |
214 | } | 225 | } |
215 | 226 | ||
216 | void udp_init( int64 sock, unsigned int worker_count ) { | 227 | void udp_init(int64 sock, unsigned int worker_count) { |
217 | pthread_t thread_id; | 228 | pthread_t thread_id; |
218 | if( !g_rijndael_round_key[0] ) | 229 | if (!g_rijndael_round_key[0]) |
219 | udp_generate_rijndael_round_key(); | 230 | udp_generate_rijndael_round_key(); |
220 | #ifdef _DEBUG | 231 | #ifdef _DEBUG |
221 | fprintf( stderr, " installing %d workers on udp socket %ld\n", worker_count, (unsigned long)sock ); | 232 | fprintf(stderr, " installing %d workers on udp socket %ld\n", worker_count, (unsigned long)sock); |
222 | #endif | 233 | #endif |
223 | while( worker_count-- ) | 234 | while (worker_count--) |
224 | pthread_create( &thread_id, NULL, udp_worker, (void *)sock ); | 235 | pthread_create(&thread_id, NULL, udp_worker, (void *)sock); |
225 | } | 236 | } |
226 | |||
227 | const char *g_version_udp_c = "$Source$: $Revision$\n"; | ||
@@ -6,7 +6,7 @@ | |||
6 | #ifndef OT_UDP_H__ | 6 | #ifndef OT_UDP_H__ |
7 | #define OT_UDP_H__ | 7 | #define OT_UDP_H__ |
8 | 8 | ||
9 | void udp_init( int64 sock, unsigned int worker_count ); | 9 | void udp_init(int64 sock, unsigned int worker_count); |
10 | int handle_udp6( int64 serversocket, struct ot_workstruct *ws ); | 10 | int handle_udp6(int64 serversocket, struct ot_workstruct *ws); |
11 | 11 | ||
12 | #endif | 12 | #endif |
diff --git a/ot_vector.c b/ot_vector.c index 2a632b2..2bc07b5 100644 --- a/ot_vector.c +++ b/ot_vector.c | |||
@@ -4,39 +4,37 @@ | |||
4 | $id$ */ | 4 | $id$ */ |
5 | 5 | ||
6 | /* System */ | 6 | /* System */ |
7 | #include <stddef.h> | ||
8 | #include <stdint.h> | ||
7 | #include <stdlib.h> | 9 | #include <stdlib.h> |
8 | #include <string.h> | 10 | #include <string.h> |
9 | #include <strings.h> | 11 | #include <strings.h> |
10 | #include <stdint.h> | ||
11 | 12 | ||
12 | /* Opentracker */ | 13 | /* Opentracker */ |
13 | #include "trackerlogic.h" | 14 | #include "trackerlogic.h" |
14 | #include "ot_vector.h" | ||
15 | 15 | ||
16 | /* Libowfat */ | 16 | /* Libowfat */ |
17 | #include "uint32.h" | ||
18 | #include "uint16.h" | 17 | #include "uint16.h" |
18 | #include "uint32.h" | ||
19 | 19 | ||
20 | static int vector_compare_peer(const void *peer1, const void *peer2 ) { | 20 | static int vector_compare_peer6(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE6); } |
21 | return memcmp( peer1, peer2, OT_PEER_COMPARE_SIZE ); | 21 | static int vector_compare_peer4(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE4); } |
22 | } | ||
23 | 22 | ||
24 | /* This function gives us a binary search that returns a pointer, even if | 23 | /* This function gives us a binary search that returns a pointer, even if |
25 | no exact match is found. In that case it sets exactmatch 0 and gives | 24 | no exact match is found. In that case it sets exactmatch 0 and gives |
26 | calling functions the chance to insert data | 25 | calling functions the chance to insert data |
27 | */ | 26 | */ |
28 | void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size, | 27 | void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch) { |
29 | size_t compare_size, int *exactmatch ) { | ||
30 | size_t interval = member_count; | 28 | size_t interval = member_count; |
31 | 29 | ||
32 | while( interval ) { | 30 | while (interval) { |
33 | uint8_t *lookat = ((uint8_t*)base) + member_size * ( interval / 2 ); | 31 | uint8_t *lookat = ((uint8_t *)base) + member_size * (interval / 2); |
34 | int cmp = memcmp( lookat, key, compare_size ); | 32 | int cmp = memcmp(lookat, key, compare_size); |
35 | if(cmp == 0 ) { | 33 | if (cmp == 0) { |
36 | base = lookat; | 34 | base = lookat; |
37 | break; | 35 | break; |
38 | } | 36 | } |
39 | if(cmp < 0) { | 37 | if (cmp < 0) { |
40 | base = lookat + member_size; | 38 | base = lookat + member_size; |
41 | interval--; | 39 | interval--; |
42 | } | 40 | } |
@@ -44,13 +42,14 @@ void *binary_search( const void * const key, const void * base, const size_t mem | |||
44 | } | 42 | } |
45 | 43 | ||
46 | *exactmatch = interval; | 44 | *exactmatch = interval; |
47 | return (void*)base; | 45 | return (void *)base; |
48 | } | 46 | } |
49 | 47 | ||
50 | static uint8_t vector_hash_peer( ot_peer *peer, int bucket_count ) { | 48 | static uint8_t vector_hash_peer(ot_peer const *peer, size_t compare_size, int bucket_count) { |
51 | unsigned int hash = 5381, i = OT_PEER_COMPARE_SIZE; | 49 | unsigned int hash = 5381; |
52 | uint8_t *p = (uint8_t*)peer; | 50 | uint8_t *p = (uint8_t *)peer; |
53 | while( i-- ) hash += (hash<<5) + *(p++); | 51 | while (compare_size--) |
52 | hash += (hash << 5) + *(p++); | ||
54 | return hash % bucket_count; | 53 | return hash % bucket_count; |
55 | } | 54 | } |
56 | 55 | ||
@@ -61,48 +60,62 @@ static uint8_t vector_hash_peer( ot_peer *peer, int bucket_count ) { | |||
61 | if it wasn't found in vector. Caller needs to check the passed "exactmatch" variable to see, whether an insert | 60 | if it wasn't found in vector. Caller needs to check the passed "exactmatch" variable to see, whether an insert |
62 | took place. If resizing the vector failed, NULL is returned, else the pointer to the object in vector. | 61 | took place. If resizing the vector failed, NULL is returned, else the pointer to the object in vector. |
63 | */ | 62 | */ |
64 | void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch ) { | 63 | void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch) { |
65 | uint8_t *match = binary_search( key, vector->data, vector->size, member_size, compare_size, exactmatch ); | 64 | uint8_t *match = binary_search(key, vector->data, vector->size, member_size, compare_size, exactmatch); |
66 | 65 | ||
67 | if( *exactmatch ) return match; | 66 | if (*exactmatch) |
67 | return match; | ||
68 | 68 | ||
69 | if( vector->size + 1 > vector->space ) { | 69 | if (vector->size + 1 > vector->space) { |
70 | size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; | 70 | size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; |
71 | uint8_t *new_data = realloc( vector->data, new_space * member_size ); | 71 | uint8_t *new_data = realloc(vector->data, new_space * member_size); |
72 | if( !new_data ) return NULL; | 72 | if (!new_data) |
73 | return NULL; | ||
73 | /* Adjust pointer if it moved by realloc */ | 74 | /* Adjust pointer if it moved by realloc */ |
74 | match = new_data + (match - (uint8_t*)vector->data); | 75 | match = new_data + (match - (uint8_t *)vector->data); |
75 | 76 | ||
76 | vector->data = new_data; | 77 | vector->data = new_data; |
77 | vector->space = new_space; | 78 | vector->space = new_space; |
78 | } | 79 | } |
79 | memmove( match + member_size, match, ((uint8_t*)vector->data) + member_size * vector->size - match ); | 80 | memmove(match + member_size, match, ((uint8_t *)vector->data) + member_size * vector->size - match); |
80 | 81 | ||
81 | vector->size++; | 82 | vector->size++; |
82 | return match; | 83 | return match; |
83 | } | 84 | } |
84 | 85 | ||
85 | ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exactmatch ) { | 86 | ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch) { |
86 | ot_peer *match; | 87 | ot_peer *match, *end; |
88 | const size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); | ||
89 | size_t match_to_end; | ||
87 | 90 | ||
88 | /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ | 91 | /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ |
89 | if( vector->space < vector->size ) | 92 | if (vector->space < vector->size) |
90 | vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, vector->size ); | 93 | vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size); |
91 | match = (ot_peer*)binary_search( peer, vector->data, vector->size, sizeof(ot_peer), OT_PEER_COMPARE_SIZE, exactmatch ); | 94 | match = binary_search(peer, vector->data, vector->size, peer_size, compare_size, exactmatch); |
92 | 95 | ||
93 | if( *exactmatch ) return match; | 96 | if (*exactmatch) |
97 | return match; | ||
94 | 98 | ||
95 | if( vector->size + 1 > vector->space ) { | 99 | /* This is the amount of bytes that needs to be pushed backwards by peer_size bytes to make room for new peer */ |
96 | size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; | 100 | end = (ot_peer *)vector->data + vector->size * peer_size; |
97 | ot_peer *new_data = realloc( vector->data, new_space * sizeof(ot_peer) ); | 101 | match_to_end = end - match; |
98 | if( !new_data ) return NULL; | 102 | |
103 | if (vector->size + 1 > vector->space) { | ||
104 | ptrdiff_t offset = match - (ot_peer *)vector->data; | ||
105 | size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; | ||
106 | ot_peer *new_data = realloc(vector->data, new_space * peer_size); | ||
107 | |||
108 | if (!new_data) | ||
109 | return NULL; | ||
99 | /* Adjust pointer if it moved by realloc */ | 110 | /* Adjust pointer if it moved by realloc */ |
100 | match = new_data + (match - (ot_peer*)vector->data); | 111 | match = new_data + offset; |
101 | 112 | ||
102 | vector->data = new_data; | 113 | vector->data = new_data; |
103 | vector->space = new_space; | 114 | vector->space = new_space; |
104 | } | 115 | } |
105 | memmove( match + 1, match, sizeof(ot_peer) * ( ((ot_peer*)vector->data) + vector->size - match ) ); | 116 | |
117 | /* Here we're guaranteed to have enough space in vector to move the block of peers after insertion point */ | ||
118 | memmove(match + peer_size, match, match_to_end); | ||
106 | 119 | ||
107 | vector->size++; | 120 | vector->size++; |
108 | return match; | 121 | return match; |
@@ -113,126 +126,134 @@ ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exac | |||
113 | 1 if a non-seeding peer was removed | 126 | 1 if a non-seeding peer was removed |
114 | 2 if a seeding peer was removed | 127 | 2 if a seeding peer was removed |
115 | */ | 128 | */ |
116 | int vector_remove_peer( ot_vector *vector, ot_peer *peer ) { | 129 | int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size) { |
117 | int exactmatch; | 130 | int exactmatch, was_seeder; |
118 | ot_peer *match, *end; | 131 | ot_peer *match, *end; |
132 | size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); | ||
119 | 133 | ||
120 | if( !vector->size ) return 0; | 134 | if (!vector->size) |
135 | return 0; | ||
121 | 136 | ||
122 | /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ | 137 | /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ |
123 | if( vector->space < vector->size ) | 138 | if (vector->space < vector->size) |
124 | vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, vector->size ); | 139 | vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size); |
125 | 140 | ||
126 | end = ((ot_peer*)vector->data) + vector->size; | 141 | end = ((ot_peer *)vector->data) + peer_size * vector->size; |
127 | match = (ot_peer*)binary_search( peer, vector->data, vector->size, sizeof(ot_peer), OT_PEER_COMPARE_SIZE, &exactmatch ); | 142 | match = (ot_peer *)binary_search(peer, vector->data, vector->size, peer_size, compare_size, &exactmatch); |
128 | if( !exactmatch ) return 0; | 143 | if (!exactmatch) |
144 | return 0; | ||
129 | 145 | ||
130 | exactmatch = ( OT_PEERFLAG( match ) & PEER_FLAG_SEEDING ) ? 2 : 1; | 146 | was_seeder = (OT_PEERFLAG_D(match, peer_size) & PEER_FLAG_SEEDING) ? 2 : 1; |
131 | memmove( match, match + 1, sizeof(ot_peer) * ( end - match - 1 ) ); | 147 | memmove(match, match + peer_size, end - match - peer_size); |
132 | 148 | ||
133 | vector->size--; | 149 | vector->size--; |
134 | vector_fixup_peers( vector ); | 150 | vector_fixup_peers(vector, peer_size); |
135 | return exactmatch; | 151 | return was_seeder; |
136 | } | 152 | } |
137 | 153 | ||
138 | void vector_remove_torrent( ot_vector *vector, ot_torrent *match ) { | 154 | void vector_remove_torrent(ot_vector *vector, ot_torrent *match) { |
139 | ot_torrent *end = ((ot_torrent*)vector->data) + vector->size; | 155 | ot_torrent *end = ((ot_torrent *)vector->data) + vector->size; |
140 | 156 | ||
141 | if( !vector->size ) return; | 157 | if (!vector->size) |
158 | return; | ||
142 | 159 | ||
143 | /* If this is being called after a unsuccessful malloc() for peer_list | 160 | /* If this is being called after a unsuccessful malloc() for peer_list |
144 | in add_peer_to_torrent, match->peer_list actually might be NULL */ | 161 | in add_peer_to_torrent, match->peer_list actually might be NULL */ |
145 | if( match->peer_list) free_peerlist( match->peer_list ); | 162 | free_peerlist(match->peer_list6); |
163 | free_peerlist(match->peer_list4); | ||
146 | 164 | ||
147 | memmove( match, match + 1, sizeof(ot_torrent) * ( end - match - 1 ) ); | 165 | memmove(match, match + 1, sizeof(ot_torrent) * (end - match - 1)); |
148 | if( ( --vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) && ( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) { | 166 | if ((--vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) { |
149 | vector->space /= OT_VECTOR_SHRINK_RATIO; | 167 | vector->space /= OT_VECTOR_SHRINK_RATIO; |
150 | vector->data = realloc( vector->data, vector->space * sizeof( ot_torrent ) ); | 168 | vector->data = realloc(vector->data, vector->space * sizeof(ot_torrent)); |
151 | } | 169 | } |
152 | } | 170 | } |
153 | 171 | ||
154 | void vector_clean_list( ot_vector * vector, int num_buckets ) { | 172 | void vector_clean_list(ot_vector *vector, int num_buckets) { |
155 | while( num_buckets-- ) | 173 | while (num_buckets--) |
156 | free( vector[num_buckets].data ); | 174 | free(vector[num_buckets].data); |
157 | free( vector ); | 175 | free(vector); |
158 | return; | 176 | return; |
159 | } | 177 | } |
160 | 178 | ||
161 | void vector_redistribute_buckets( ot_peerlist * peer_list ) { | 179 | void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size) { |
162 | int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1; | 180 | int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1; |
163 | ot_vector * bucket_list_new, * bucket_list_old = &peer_list->peers; | 181 | ot_vector *bucket_list_new, *bucket_list_old = &peer_list->peers; |
182 | int (*sort_func)(const void *, const void *) = peer_size == OT_PEER_SIZE6 ? &vector_compare_peer6 : &vector_compare_peer4; | ||
164 | 183 | ||
165 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { | 184 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
166 | num_buckets_old = peer_list->peers.size; | 185 | num_buckets_old = peer_list->peers.size; |
167 | bucket_list_old = peer_list->peers.data; | 186 | bucket_list_old = peer_list->peers.data; |
168 | } | 187 | } |
169 | 188 | ||
170 | if( peer_list->peer_count < 255 ) | 189 | if (peer_list->peer_count < 255) |
171 | num_buckets_new = 1; | 190 | num_buckets_new = 1; |
172 | else if( peer_list->peer_count > 8192 ) | 191 | else if (peer_list->peer_count > 8192) |
173 | num_buckets_new = 64; | 192 | num_buckets_new = 64; |
174 | else if( peer_list->peer_count >= 512 && peer_list->peer_count < 4096 ) | 193 | else if (peer_list->peer_count >= 512 && peer_list->peer_count < 4096) |
175 | num_buckets_new = 16; | 194 | num_buckets_new = 16; |
176 | else if( peer_list->peer_count < 512 && num_buckets_old <= 16 ) | 195 | else if (peer_list->peer_count < 512 && num_buckets_old <= 16) |
177 | num_buckets_new = num_buckets_old; | 196 | num_buckets_new = num_buckets_old; |
178 | else if( peer_list->peer_count < 512 ) | 197 | else if (peer_list->peer_count < 512) |
179 | num_buckets_new = 1; | 198 | num_buckets_new = 1; |
180 | else if( peer_list->peer_count < 8192 && num_buckets_old > 1 ) | 199 | else if (peer_list->peer_count < 8192 && num_buckets_old > 1) |
181 | num_buckets_new = num_buckets_old; | 200 | num_buckets_new = num_buckets_old; |
182 | else | 201 | else |
183 | num_buckets_new = 16; | 202 | num_buckets_new = 16; |
184 | 203 | ||
185 | if( num_buckets_new == num_buckets_old ) | 204 | if (num_buckets_new == num_buckets_old) |
186 | return; | 205 | return; |
187 | 206 | ||
188 | /* Assume near perfect distribution */ | 207 | /* Assume near perfect distribution */ |
189 | bucket_list_new = malloc( num_buckets_new * sizeof( ot_vector ) ); | 208 | bucket_list_new = malloc(num_buckets_new * sizeof(ot_vector)); |
190 | if( !bucket_list_new) return; | 209 | if (!bucket_list_new) |
191 | bzero( bucket_list_new, num_buckets_new * sizeof( ot_vector ) ); | 210 | return; |
211 | bzero(bucket_list_new, num_buckets_new * sizeof(ot_vector)); | ||
192 | 212 | ||
193 | tmp = peer_list->peer_count / num_buckets_new; | 213 | tmp = peer_list->peer_count / num_buckets_new; |
194 | bucket_size_new = OT_VECTOR_MIN_MEMBERS; | 214 | bucket_size_new = OT_VECTOR_MIN_MEMBERS; |
195 | while( bucket_size_new < tmp) | 215 | while (bucket_size_new < tmp) |
196 | bucket_size_new *= OT_VECTOR_GROW_RATIO; | 216 | bucket_size_new *= OT_VECTOR_GROW_RATIO; |
197 | 217 | ||
198 | /* preallocate vectors to hold all peers */ | 218 | /* preallocate vectors to hold all peers */ |
199 | for( bucket=0; bucket<num_buckets_new; ++bucket ) { | 219 | for (bucket = 0; bucket < num_buckets_new; ++bucket) { |
200 | bucket_list_new[bucket].space = bucket_size_new; | 220 | bucket_list_new[bucket].space = bucket_size_new; |
201 | bucket_list_new[bucket].data = malloc( bucket_size_new * sizeof(ot_peer) ); | 221 | bucket_list_new[bucket].data = malloc(bucket_size_new * peer_size); |
202 | if( !bucket_list_new[bucket].data ) | 222 | if (!bucket_list_new[bucket].data) |
203 | return vector_clean_list( bucket_list_new, num_buckets_new ); | 223 | return vector_clean_list(bucket_list_new, num_buckets_new); |
204 | } | 224 | } |
205 | 225 | ||
206 | /* Now sort them into the correct bucket */ | 226 | /* Now sort them into the correct bucket */ |
207 | for( bucket=0; bucket<num_buckets_old; ++bucket ) { | 227 | for (bucket = 0; bucket < num_buckets_old; ++bucket) { |
208 | ot_peer * peers_old = bucket_list_old[bucket].data, * peers_new; | 228 | ot_peer *peers_old = bucket_list_old[bucket].data; |
209 | int peer_count_old = bucket_list_old[bucket].size; | 229 | int peer_count_old = bucket_list_old[bucket].size; |
210 | while( peer_count_old-- ) { | 230 | while (peer_count_old--) { |
211 | ot_vector * bucket_dest = bucket_list_new; | 231 | ot_vector *bucket_dest = bucket_list_new; |
212 | if( num_buckets_new > 1 ) | 232 | if (num_buckets_new > 1) |
213 | bucket_dest += vector_hash_peer(peers_old, num_buckets_new); | 233 | bucket_dest += vector_hash_peer(peers_old, OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size), num_buckets_new); |
214 | if( bucket_dest->size + 1 > bucket_dest->space ) { | 234 | if (bucket_dest->size + 1 > bucket_dest->space) { |
215 | void * tmp = realloc( bucket_dest->data, sizeof(ot_peer) * OT_VECTOR_GROW_RATIO * bucket_dest->space ); | 235 | void *tmp = realloc(bucket_dest->data, peer_size * OT_VECTOR_GROW_RATIO * bucket_dest->space); |
216 | if( !tmp ) return vector_clean_list( bucket_list_new, num_buckets_new ); | 236 | if (!tmp) |
237 | return vector_clean_list(bucket_list_new, num_buckets_new); | ||
217 | bucket_dest->data = tmp; | 238 | bucket_dest->data = tmp; |
218 | bucket_dest->space *= OT_VECTOR_GROW_RATIO; | 239 | bucket_dest->space *= OT_VECTOR_GROW_RATIO; |
219 | } | 240 | } |
220 | peers_new = (ot_peer*)bucket_dest->data; | 241 | memcpy((ot_peer *)bucket_dest->data + peer_size * bucket_dest->size++, peers_old, peer_size); |
221 | memcpy(peers_new + bucket_dest->size++, peers_old++, sizeof(ot_peer)); | 242 | peers_old += peer_size; |
222 | } | 243 | } |
223 | } | 244 | } |
224 | 245 | ||
225 | /* Now sort each bucket to later allow bsearch */ | 246 | /* Now sort each bucket to later allow bsearch */ |
226 | for( bucket=0; bucket<num_buckets_new; ++bucket ) | 247 | for (bucket = 0; bucket < num_buckets_new; ++bucket) |
227 | qsort( bucket_list_new[bucket].data, bucket_list_new[bucket].size, sizeof( ot_peer ), vector_compare_peer ); | 248 | qsort(bucket_list_new[bucket].data, bucket_list_new[bucket].size, peer_size, sort_func); |
228 | 249 | ||
229 | /* Everything worked fine. Now link new bucket_list to peer_list */ | 250 | /* Everything worked fine. Now link new bucket_list to peer_list */ |
230 | if( OT_PEERLIST_HASBUCKETS( peer_list) ) | 251 | if (OT_PEERLIST_HASBUCKETS(peer_list)) |
231 | vector_clean_list( (ot_vector*)peer_list->peers.data, peer_list->peers.size ); | 252 | vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size); |
232 | else | 253 | else |
233 | free( peer_list->peers.data ); | 254 | free(peer_list->peers.data); |
234 | 255 | ||
235 | if( num_buckets_new > 1 ) { | 256 | if (num_buckets_new > 1) { |
236 | peer_list->peers.data = bucket_list_new; | 257 | peer_list->peers.data = bucket_list_new; |
237 | peer_list->peers.size = num_buckets_new; | 258 | peer_list->peers.size = num_buckets_new; |
238 | peer_list->peers.space = 0; /* Magic marker for "is list of buckets" */ | 259 | peer_list->peers.space = 0; /* Magic marker for "is list of buckets" */ |
@@ -240,27 +261,24 @@ void vector_redistribute_buckets( ot_peerlist * peer_list ) { | |||
240 | peer_list->peers.data = bucket_list_new->data; | 261 | peer_list->peers.data = bucket_list_new->data; |
241 | peer_list->peers.size = bucket_list_new->size; | 262 | peer_list->peers.size = bucket_list_new->size; |
242 | peer_list->peers.space = bucket_list_new->space; | 263 | peer_list->peers.space = bucket_list_new->space; |
243 | free( bucket_list_new ); | 264 | free(bucket_list_new); |
244 | } | 265 | } |
245 | } | 266 | } |
246 | 267 | ||
247 | void vector_fixup_peers( ot_vector * vector ) { | 268 | void vector_fixup_peers(ot_vector *vector, size_t peer_size) { |
248 | int need_fix = 0; | 269 | int need_fix = 0; |
249 | 270 | ||
250 | if( !vector->size ) { | 271 | if (!vector->size) { |
251 | free( vector->data ); | 272 | free(vector->data); |
252 | vector->data = NULL; | 273 | vector->data = NULL; |
253 | vector->space = 0; | 274 | vector->space = 0; |
254 | return; | 275 | return; |
255 | } | 276 | } |
256 | 277 | ||
257 | while( ( vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) && | 278 | while ((vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) { |
258 | ( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) { | ||
259 | vector->space /= OT_VECTOR_SHRINK_RATIO; | 279 | vector->space /= OT_VECTOR_SHRINK_RATIO; |
260 | need_fix++; | 280 | need_fix++; |
261 | } | 281 | } |
262 | if( need_fix ) | 282 | if (need_fix) |
263 | vector->data = realloc( vector->data, vector->space * sizeof( ot_peer ) ); | 283 | vector->data = realloc(vector->data, vector->space * peer_size); |
264 | } | 284 | } |
265 | |||
266 | const char *g_version_vector_c = "$Source$: $Revision$\n"; | ||
diff --git a/ot_vector.h b/ot_vector.h index f7f87aa..8d41452 100644 --- a/ot_vector.h +++ b/ot_vector.h | |||
@@ -16,19 +16,21 @@ | |||
16 | #define OT_PEER_BUCKET_MAXCOUNT 256 | 16 | #define OT_PEER_BUCKET_MAXCOUNT 256 |
17 | 17 | ||
18 | typedef struct { | 18 | typedef struct { |
19 | void *data; | 19 | void *data; |
20 | size_t size; | 20 | size_t size; |
21 | size_t space; | 21 | size_t space; |
22 | } ot_vector; | 22 | } ot_vector; |
23 | 23 | ||
24 | void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size, | 24 | void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch); |
25 | size_t compare_size, int *exactmatch ); | 25 | void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch); |
26 | void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch ); | 26 | ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch); |
27 | ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exactmatch ); | ||
28 | 27 | ||
29 | int vector_remove_peer( ot_vector *vector, ot_peer *peer ); | 28 | int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size); |
30 | void vector_remove_torrent( ot_vector *vector, ot_torrent *match ); | 29 | void vector_remove_torrent(ot_vector *vector, ot_torrent *match); |
31 | void vector_redistribute_buckets( ot_peerlist * peer_list ); | 30 | |
32 | void vector_fixup_peers( ot_vector * vector ); | 31 | /* For ot_clean.c */ |
32 | void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size); | ||
33 | void vector_fixup_peers(ot_vector *vector, size_t peer_size); | ||
34 | void vector_clean_list(ot_vector *vector, int num_buckets); | ||
33 | 35 | ||
34 | #endif | 36 | #endif |
@@ -4,33 +4,33 @@ | |||
4 | $Id$ */ | 4 | $Id$ */ |
5 | 5 | ||
6 | /* System */ | 6 | /* System */ |
7 | #include <arpa/inet.h> | ||
8 | #include <ctype.h> | ||
9 | #include <errno.h> | ||
10 | #include <pthread.h> | ||
11 | #include <pwd.h> | ||
12 | #include <signal.h> | ||
7 | #include <stdint.h> | 13 | #include <stdint.h> |
14 | #include <stdio.h> | ||
8 | #include <stdlib.h> | 15 | #include <stdlib.h> |
9 | #include <string.h> | 16 | #include <string.h> |
10 | #include <arpa/inet.h> | ||
11 | #include <sys/socket.h> | 17 | #include <sys/socket.h> |
12 | #include <unistd.h> | 18 | #include <unistd.h> |
13 | #include <errno.h> | ||
14 | #include <signal.h> | ||
15 | #include <stdio.h> | ||
16 | #include <pwd.h> | ||
17 | #include <ctype.h> | ||
18 | #include <pthread.h> | ||
19 | 19 | ||
20 | /* Libowfat */ | 20 | /* Libowfat */ |
21 | #include "socket.h" | 21 | #include "byte.h" |
22 | #include "io.h" | 22 | #include "io.h" |
23 | #include "iob.h" | 23 | #include "iob.h" |
24 | #include "byte.h" | ||
25 | #include "scan.h" | ||
26 | #include "ip6.h" | 24 | #include "ip6.h" |
27 | #include "ndelay.h" | 25 | #include "ndelay.h" |
26 | #include "scan.h" | ||
27 | #include "socket.h" | ||
28 | 28 | ||
29 | /* Opentracker */ | 29 | /* Opentracker */ |
30 | #include "trackerlogic.h" | ||
31 | #include "ot_vector.h" | ||
32 | #include "ot_mutex.h" | 30 | #include "ot_mutex.h" |
33 | #include "ot_stats.h" | 31 | #include "ot_stats.h" |
32 | #include "ot_vector.h" | ||
33 | #include "trackerlogic.h" | ||
34 | 34 | ||
35 | #ifndef WANT_SYNC_LIVE | 35 | #ifndef WANT_SYNC_LIVE |
36 | #define WANT_SYNC_LIVE | 36 | #define WANT_SYNC_LIVE |
@@ -40,28 +40,28 @@ | |||
40 | ot_ip6 g_serverip; | 40 | ot_ip6 g_serverip; |
41 | uint16_t g_serverport = 9009; | 41 | uint16_t g_serverport = 9009; |
42 | uint32_t g_tracker_id; | 42 | uint32_t g_tracker_id; |
43 | char groupip_1[4] = { 224,0,23,5 }; | 43 | char groupip_1[4] = {224, 0, 23, 5}; |
44 | int g_self_pipe[2]; | 44 | int g_self_pipe[2]; |
45 | 45 | ||
46 | /* If you have more than 10 peers, don't use this proxy | 46 | /* If you have more than 10 peers, don't use this proxy |
47 | Use 20 slots for 10 peers to have room for 10 incoming connection slots | 47 | Use 20 slots for 10 peers to have room for 10 incoming connection slots |
48 | */ | 48 | */ |
49 | #define MAX_PEERS 20 | 49 | #define MAX_PEERS 20 |
50 | 50 | ||
51 | #define LIVESYNC_INCOMING_BUFFSIZE (256*256) | 51 | #define LIVESYNC_INCOMING_BUFFSIZE (256 * 256) |
52 | #define STREAMSYNC_OUTGOING_BUFFSIZE (256*256) | 52 | #define STREAMSYNC_OUTGOING_BUFFSIZE (256 * 256) |
53 | 53 | ||
54 | #define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480 | 54 | #define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480 |
55 | #define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer)+sizeof(ot_hash)) | 55 | #define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer) + sizeof(ot_hash)) |
56 | #define LIVESYNC_MAXDELAY 15 /* seconds */ | 56 | #define LIVESYNC_MAXDELAY 15 /* seconds */ |
57 | 57 | ||
58 | /* The amount of time a complete sync cycle should take */ | 58 | /* The amount of time a complete sync cycle should take */ |
59 | #define OT_SYNC_INTERVAL_MINUTES 2 | 59 | #define OT_SYNC_INTERVAL_MINUTES 2 |
60 | 60 | ||
61 | /* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */ | 61 | /* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */ |
62 | #define OT_SYNC_SLEEP ( ( ( OT_SYNC_INTERVAL_MINUTES ) * 60 * 1000000 ) / ( OT_BUCKET_COUNT ) ) | 62 | #define OT_SYNC_SLEEP (((OT_SYNC_INTERVAL_MINUTES) * 60 * 1000000) / (OT_BUCKET_COUNT)) |
63 | 63 | ||
64 | enum { OT_SYNC_PEER }; | 64 | enum { OT_SYNC_PEER4, OT_SYNC_PEER6 }; |
65 | enum { FLAG_SERVERSOCKET = 1 }; | 65 | enum { FLAG_SERVERSOCKET = 1 }; |
66 | 66 | ||
67 | /* For incoming packets */ | 67 | /* For incoming packets */ |
@@ -75,145 +75,153 @@ static uint8_t *g_peerbuffer_pos; | |||
75 | static uint8_t *g_peerbuffer_highwater = g_peerbuffer_start + LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS; | 75 | static uint8_t *g_peerbuffer_highwater = g_peerbuffer_start + LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS; |
76 | static ot_time g_next_packet_time; | 76 | static ot_time g_next_packet_time; |
77 | 77 | ||
78 | static void * livesync_worker( void * args ); | 78 | static void *livesync_worker(void *args); |
79 | static void * streamsync_worker( void * args ); | 79 | static void *streamsync_worker(void *args); |
80 | static void livesync_proxytell( uint8_t prefix, uint8_t *info_hash, uint8_t *peer ); | 80 | static void livesync_proxytell(uint8_t prefix, uint8_t *info_hash, uint8_t *peer); |
81 | 81 | ||
82 | void exerr( char * message ) { | 82 | void exerr(char *message) { |
83 | fprintf( stderr, "%s\n", message ); | 83 | fprintf(stderr, "%s\n", message); |
84 | exit( 111 ); | 84 | exit(111); |
85 | } | 85 | } |
86 | 86 | ||
87 | void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ) { | 87 | void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data) { |
88 | (void) event; | 88 | (void)event; |
89 | (void) proto; | 89 | (void)proto; |
90 | (void) event_data; | 90 | (void)event_data; |
91 | } | 91 | } |
92 | 92 | ||
93 | void livesync_bind_mcast( ot_ip6 ip, uint16_t port) { | 93 | void livesync_bind_mcast(ot_ip6 ip, uint16_t port) { |
94 | char tmpip[4] = {0,0,0,0}; | 94 | char tmpip[4] = {0, 0, 0, 0}; |
95 | char *v4ip; | 95 | char *v4ip; |
96 | 96 | ||
97 | if( !ip6_isv4mapped(ip)) | 97 | if (!ip6_isv4mapped(ip)) |
98 | exerr("v6 mcast support not yet available."); | 98 | exerr("v6 mcast support not yet available."); |
99 | v4ip = ip+12; | 99 | v4ip = ip + 12; |
100 | 100 | ||
101 | if( g_socket_in != -1 ) | 101 | if (g_socket_in != -1) |
102 | exerr("Error: Livesync listen ip specified twice."); | 102 | exerr("Error: Livesync listen ip specified twice."); |
103 | 103 | ||
104 | if( ( g_socket_in = socket_udp4( )) < 0) | 104 | if ((g_socket_in = socket_udp4()) < 0) |
105 | exerr("Error: Cant create live sync incoming socket." ); | 105 | exerr("Error: Cant create live sync incoming socket."); |
106 | ndelay_off(g_socket_in); | 106 | ndelay_off(g_socket_in); |
107 | 107 | ||
108 | if( socket_bind4_reuse( g_socket_in, tmpip, port ) == -1 ) | 108 | if (socket_bind4_reuse(g_socket_in, tmpip, port) == -1) |
109 | exerr("Error: Cant bind live sync incoming socket." ); | 109 | exerr("Error: Cant bind live sync incoming socket."); |
110 | 110 | ||
111 | if( socket_mcjoin4( g_socket_in, groupip_1, v4ip ) ) | 111 | if (socket_mcjoin4(g_socket_in, groupip_1, v4ip)) |
112 | exerr("Error: Cant make live sync incoming socket join mcast group."); | 112 | exerr("Error: Cant make live sync incoming socket join mcast group."); |
113 | 113 | ||
114 | if( ( g_socket_out = socket_udp4()) < 0) | 114 | if ((g_socket_out = socket_udp4()) < 0) |
115 | exerr("Error: Cant create live sync outgoing socket." ); | 115 | exerr("Error: Cant create live sync outgoing socket."); |
116 | if( socket_bind4_reuse( g_socket_out, v4ip, port ) == -1 ) | 116 | if (socket_bind4_reuse(g_socket_out, v4ip, port) == -1) |
117 | exerr("Error: Cant bind live sync outgoing socket." ); | 117 | exerr("Error: Cant bind live sync outgoing socket."); |
118 | 118 | ||
119 | socket_mcttl4(g_socket_out, 1); | 119 | socket_mcttl4(g_socket_out, 1); |
120 | socket_mcloop4(g_socket_out, 1); | 120 | socket_mcloop4(g_socket_out, 1); |
121 | } | 121 | } |
122 | 122 | ||
123 | size_t add_peer_to_torrent_proxy( ot_hash hash, ot_peer *peer ) { | 123 | size_t add_peer_to_torrent_proxy(ot_hash hash, ot_peer *peer, size_t peer_size) { |
124 | int exactmatch; | 124 | int exactmatch; |
125 | ot_torrent *torrent; | 125 | ot_torrent *torrent; |
126 | ot_peer *peer_dest; | 126 | ot_peerlist *peer_list; |
127 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | 127 | ot_peer *peer_dest; |
128 | 128 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash); | |
129 | torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 129 | size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); |
130 | if( !torrent ) | 130 | |
131 | torrent = vector_find_or_insert(torrents_list, (void *)hash, sizeof(ot_torrent), compare_size, &exactmatch); | ||
132 | if (!torrent) | ||
131 | return -1; | 133 | return -1; |
132 | 134 | ||
133 | if( !exactmatch ) { | 135 | if (!exactmatch) { |
134 | /* Create a new torrent entry, then */ | 136 | /* Create a new torrent entry, then */ |
135 | memcpy( torrent->hash, hash, sizeof(ot_hash) ); | 137 | memcpy(torrent->hash, hash, sizeof(ot_hash)); |
136 | 138 | ||
137 | if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { | 139 | if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) { |
138 | vector_remove_torrent( torrents_list, torrent ); | 140 | vector_remove_torrent(torrents_list, torrent); |
139 | mutex_bucket_unlock_by_hash( hash, 0 ); | 141 | mutex_bucket_unlock_by_hash(hash, 0); |
140 | return -1; | 142 | return -1; |
141 | } | 143 | } |
142 | 144 | ||
143 | byte_zero( torrent->peer_list, sizeof( ot_peerlist ) ); | 145 | byte_zero(torrent->peer_list6, sizeof(ot_peerlist)); |
146 | byte_zero(torrent->peer_list4, sizeof(ot_peerlist)); | ||
144 | } | 147 | } |
145 | 148 | ||
149 | peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; | ||
150 | |||
146 | /* Check for peer in torrent */ | 151 | /* Check for peer in torrent */ |
147 | peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), peer, &exactmatch ); | 152 | peer_dest = vector_find_or_insert_peer(&(peer_list->peers), peer, peer_size, &exactmatch); |
148 | if( !peer_dest ) { | 153 | if (!peer_dest) { |
149 | mutex_bucket_unlock_by_hash( hash, 0 ); | 154 | mutex_bucket_unlock_by_hash(hash, 0); |
150 | return -1; | 155 | return -1; |
151 | } | 156 | } |
152 | /* Tell peer that it's fresh */ | 157 | /* Tell peer that it's fresh */ |
153 | OT_PEERTIME( peer ) = 0; | 158 | OT_PEERTIME(peer, peer_size) = 0; |
154 | 159 | ||
155 | /* If we hadn't had a match create peer there */ | 160 | /* If we hadn't had a match create peer there */ |
156 | if( !exactmatch ) { | 161 | if (!exactmatch) { |
157 | torrent->peer_list->peer_count++; | 162 | peer_list->peer_count++; |
158 | if( OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) | 163 | if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_SEEDING) |
159 | torrent->peer_list->seed_count++; | 164 | peer_list->seed_count++; |
160 | } | 165 | } |
161 | memcpy( peer_dest, peer, sizeof(ot_peer) ); | 166 | memcpy(peer_dest, peer, peer_size); |
162 | mutex_bucket_unlock_by_hash( hash, 0 ); | 167 | mutex_bucket_unlock_by_hash(hash, 0); |
163 | return 0; | 168 | return 0; |
164 | } | 169 | } |
165 | 170 | ||
166 | size_t remove_peer_from_torrent_proxy( ot_hash hash, ot_peer *peer ) { | 171 | size_t remove_peer_from_torrent_proxy(ot_hash hash, ot_peer *peer, size_t peer_size) { |
167 | int exactmatch; | 172 | int exactmatch; |
168 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | 173 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash); |
169 | ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 174 | ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
170 | 175 | ||
171 | if( exactmatch ) { | 176 | if (exactmatch) { |
172 | ot_peerlist *peer_list = torrent->peer_list; | 177 | ot_peerlist *peer_list = peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; |
173 | switch( vector_remove_peer( &peer_list->peers, peer ) ) { | 178 | switch (vector_remove_peer(&peer_list->peers, peer, peer_size)) { |
174 | case 2: peer_list->seed_count--; /* Intentional fallthrough */ | 179 | case 2: |
175 | case 1: peer_list->peer_count--; /* Intentional fallthrough */ | 180 | peer_list->seed_count--; /* Intentional fallthrough */ |
176 | default: break; | 181 | case 1: |
182 | peer_list->peer_count--; /* Intentional fallthrough */ | ||
183 | default: | ||
184 | break; | ||
177 | } | 185 | } |
178 | } | 186 | } |
179 | 187 | ||
180 | mutex_bucket_unlock_by_hash( hash, 0 ); | 188 | mutex_bucket_unlock_by_hash(hash, 0); |
181 | return 0; | 189 | return 0; |
182 | } | 190 | } |
183 | 191 | ||
184 | void free_peerlist( ot_peerlist *peer_list ) { | 192 | void free_peerlist(ot_peerlist *peer_list) { |
185 | if( peer_list->peers.data ) { | 193 | if (peer_list->peers.data) { |
186 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { | 194 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
187 | ot_vector *bucket_list = (ot_vector*)(peer_list->peers.data); | 195 | ot_vector *bucket_list = (ot_vector *)(peer_list->peers.data); |
188 | 196 | ||
189 | while( peer_list->peers.size-- ) | 197 | while (peer_list->peers.size--) |
190 | free( bucket_list++->data ); | 198 | free(bucket_list++->data); |
191 | } | 199 | } |
192 | free( peer_list->peers.data ); | 200 | free(peer_list->peers.data); |
193 | } | 201 | } |
194 | free( peer_list ); | 202 | free(peer_list); |
195 | } | 203 | } |
196 | 204 | ||
197 | static void livesync_handle_peersync( ssize_t datalen ) { | 205 | static void livesync_handle_peersync(ssize_t datalen, size_t peer_size) { |
198 | int off = sizeof( g_tracker_id ) + sizeof( uint32_t ); | 206 | int off = sizeof(g_tracker_id) + sizeof(uint32_t); |
199 | 207 | ||
200 | fprintf( stderr, "." ); | 208 | fprintf(stderr, "."); |
201 | 209 | ||
202 | while( off + (ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ) <= datalen ) { | 210 | while ((ssize_t)(off + sizeof(ot_hash) + peer_size) <= datalen) { |
203 | ot_peer *peer = (ot_peer*)(g_inbuffer + off + sizeof(ot_hash)); | 211 | ot_peer *peer = (ot_peer *)(g_inbuffer + off + sizeof(ot_hash)); |
204 | ot_hash *hash = (ot_hash*)(g_inbuffer + off); | 212 | ot_hash *hash = (ot_hash *)(g_inbuffer + off); |
205 | 213 | ||
206 | if( OT_PEERFLAG(peer) & PEER_FLAG_STOPPED ) | 214 | if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_STOPPED) |
207 | remove_peer_from_torrent_proxy( *hash, peer ); | 215 | remove_peer_from_torrent_proxy(*hash, peer, peer_size); |
208 | else | 216 | else |
209 | add_peer_to_torrent_proxy( *hash, peer ); | 217 | add_peer_to_torrent_proxy(*hash, peer, peer_size); |
210 | 218 | ||
211 | off += sizeof( ot_hash ) + sizeof( ot_peer ); | 219 | off += sizeof(ot_hash) + peer_size; |
212 | } | 220 | } |
213 | } | 221 | } |
214 | 222 | ||
215 | int usage( char *self ) { | 223 | int usage(char *self) { |
216 | fprintf( stderr, "Usage: %s -L <livesync_iface_ip> -l <listenip>:<listenport> -c <connectip>:<connectport>\n", self ); | 224 | fprintf(stderr, "Usage: %s -L <livesync_iface_ip> -l <listenip>:<listenport> -c <connectip>:<connectport>\n", self); |
217 | return 0; | 225 | return 0; |
218 | } | 226 | } |
219 | 227 | ||
@@ -228,115 +236,115 @@ enum { | |||
228 | FLAG_MASK = 0x07 | 236 | FLAG_MASK = 0x07 |
229 | }; | 237 | }; |
230 | 238 | ||
231 | #define PROXYPEER_NEEDSCONNECT(flag) ((flag)==FLAG_OUTGOING) | 239 | #define PROXYPEER_NEEDSCONNECT(flag) ((flag) == FLAG_OUTGOING) |
232 | #define PROXYPEER_ISCONNECTED(flag) (((flag)&FLAG_MASK)==FLAG_CONNECTED) | 240 | #define PROXYPEER_ISCONNECTED(flag) (((flag) & FLAG_MASK) == FLAG_CONNECTED) |
233 | #define PROXYPEER_SETDISCONNECTED(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_DISCONNECTED) | 241 | #define PROXYPEER_SETDISCONNECTED(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_DISCONNECTED) |
234 | #define PROXYPEER_SETCONNECTING(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_CONNECTING) | 242 | #define PROXYPEER_SETCONNECTING(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_CONNECTING) |
235 | #define PROXYPEER_SETWAITTRACKERID(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_WAITTRACKERID) | 243 | #define PROXYPEER_SETWAITTRACKERID(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_WAITTRACKERID) |
236 | #define PROXYPEER_SETCONNECTED(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_CONNECTED) | 244 | #define PROXYPEER_SETCONNECTED(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_CONNECTED) |
237 | 245 | ||
238 | typedef struct { | 246 | typedef struct { |
239 | int state; /* Whether we want to connect, how far our handshake is, etc. */ | 247 | int state; /* Whether we want to connect, how far our handshake is, etc. */ |
240 | ot_ip6 ip; /* The peer to connect to */ | 248 | ot_ip6 ip; /* The peer to connect to */ |
241 | uint16_t port; /* The peers port */ | 249 | uint16_t port; /* The peers port */ |
242 | uint8_t indata[8192*16]; /* Any data not processed yet */ | 250 | uint8_t indata[8192 * 16]; /* Any data not processed yet */ |
243 | size_t indata_length; /* Length of unprocessed data */ | 251 | size_t indata_length; /* Length of unprocessed data */ |
244 | uint32_t tracker_id; /* How the other end greeted */ | 252 | uint32_t tracker_id; /* How the other end greeted */ |
245 | int64 fd; /* A file handle, if connected, <= 0 is disconnected (0 initially, -1 else) */ | 253 | int64 fd; /* A file handle, if connected, <= 0 is disconnected (0 initially, -1 else) */ |
246 | io_batch outdata; /* The iobatch containing our sync data */ | 254 | io_batch outdata; /* The iobatch containing our sync data */ |
247 | 255 | ||
248 | size_t packet_tcount; /* Number of unprocessed torrents in packet we currently receive */ | 256 | size_t packet_tcount; /* Number of unprocessed torrents in packet we currently receive */ |
249 | uint8_t packet_tprefix; /* Prefix byte for all torrents in current packet */ | 257 | uint8_t packet_tprefix; /* Prefix byte for all torrents in current packet */ |
250 | uint8_t packet_type; /* Type of current packet */ | 258 | uint8_t packet_type; /* Type of current packet */ |
251 | uint32_t packet_tid; /* Tracker id for current packet */ | 259 | uint32_t packet_tid; /* Tracker id for current packet */ |
252 | 260 | ||
253 | } proxy_peer; | 261 | } proxy_peer; |
254 | static void process_indata( proxy_peer * peer ); | 262 | static void process_indata(proxy_peer *peer); |
255 | 263 | ||
256 | void reset_info_block( proxy_peer * peer ) { | 264 | void reset_info_block(proxy_peer *peer) { |
257 | peer->indata_length = 0; | 265 | peer->indata_length = 0; |
258 | peer->tracker_id = 0; | 266 | peer->tracker_id = 0; |
259 | peer->fd = -1; | 267 | peer->fd = -1; |
260 | peer->packet_tcount = 0; | 268 | peer->packet_tcount = 0; |
261 | iob_reset( &peer->outdata ); | 269 | iob_reset(&peer->outdata); |
262 | PROXYPEER_SETDISCONNECTED( peer->state ); | 270 | PROXYPEER_SETDISCONNECTED(peer->state); |
263 | } | 271 | } |
264 | 272 | ||
265 | /* Number of connections to peers | 273 | /* Number of connections to peers |
266 | * If a peer's IP is set, we try to reconnect, when the connection drops | 274 | * If a peer's IP is set, we try to reconnect, when the connection drops |
267 | * If we already have a connected tracker_id in our records for an _incoming_ connection, drop it | 275 | * If we already have a connected tracker_id in our records for an _incoming_ connection, drop it |
268 | * Multiple connections to/from the same ip are okay, if tracker_id doesn't match | 276 | * Multiple connections to/from the same ip are okay, if tracker_id doesn't match |
269 | * Reconnect attempts occur only twice a minute | 277 | * Reconnect attempts occur only twice a minute |
270 | */ | 278 | */ |
271 | static int g_connection_count; | 279 | static int g_connection_count; |
272 | static ot_time g_connection_reconn; | 280 | static ot_time g_connection_reconn; |
273 | static proxy_peer g_connections[MAX_PEERS]; | 281 | static proxy_peer g_connections[MAX_PEERS]; |
274 | 282 | ||
275 | static void handle_reconnects( void ) { | 283 | static void handle_reconnects(void) { |
276 | int i; | 284 | int i; |
277 | for( i=0; i<g_connection_count; ++i ) | 285 | for (i = 0; i < g_connection_count; ++i) |
278 | if( PROXYPEER_NEEDSCONNECT( g_connections[i].state ) ) { | 286 | if (PROXYPEER_NEEDSCONNECT(g_connections[i].state)) { |
279 | int64 newfd = socket_tcp6( ); | 287 | int64 newfd = socket_tcp6(); |
280 | fprintf( stderr, "(Re)connecting to peer..." ); | 288 | fprintf(stderr, "(Re)connecting to peer..."); |
281 | if( newfd < 0 ) continue; /* No socket for you */ | 289 | if (newfd < 0) |
290 | continue; /* No socket for you */ | ||
282 | io_fd(newfd); | 291 | io_fd(newfd); |
283 | if( socket_bind6_reuse(newfd,g_serverip,g_serverport,0) ) { | 292 | if (socket_bind6_reuse(newfd, g_serverip, g_serverport, 0)) { |
284 | io_close( newfd ); | 293 | io_close(newfd); |
285 | continue; | 294 | continue; |
286 | } | 295 | } |
287 | if( socket_connect6(newfd,g_connections[i].ip,g_connections[i].port,0) == -1 && | 296 | if (socket_connect6(newfd, g_connections[i].ip, g_connections[i].port, 0) == -1 && errno != EINPROGRESS && errno != EWOULDBLOCK) { |
288 | errno != EINPROGRESS && errno != EWOULDBLOCK ) { | ||
289 | close(newfd); | 297 | close(newfd); |
290 | continue; | 298 | continue; |
291 | } | 299 | } |
292 | io_wantwrite(newfd); /* So we will be informed when it is connected */ | 300 | io_wantwrite(newfd); /* So we will be informed when it is connected */ |
293 | io_setcookie(newfd,g_connections+i); | 301 | io_setcookie(newfd, g_connections + i); |
294 | 302 | ||
295 | /* Prepare connection info block */ | 303 | /* Prepare connection info block */ |
296 | reset_info_block( g_connections+i ); | 304 | reset_info_block(g_connections + i); |
297 | g_connections[i].fd = newfd; | 305 | g_connections[i].fd = newfd; |
298 | PROXYPEER_SETCONNECTING( g_connections[i].state ); | 306 | PROXYPEER_SETCONNECTING(g_connections[i].state); |
299 | } | 307 | } |
300 | g_connection_reconn = time(NULL) + 30; | 308 | g_connection_reconn = time(NULL) + 30; |
301 | } | 309 | } |
302 | 310 | ||
303 | /* Handle incoming connection requests, check against whitelist */ | 311 | /* Handle incoming connection requests, check against whitelist */ |
304 | static void handle_accept( int64 serversocket ) { | 312 | static void handle_accept(int64 serversocket) { |
305 | int64 newfd; | 313 | int64 newfd; |
306 | ot_ip6 ip; | 314 | ot_ip6 ip; |
307 | uint16 port; | 315 | uint16 port; |
308 | 316 | ||
309 | while( ( newfd = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) { | 317 | while ((newfd = socket_accept6(serversocket, ip, &port, NULL)) != -1) { |
310 | 318 | ||
311 | /* XXX some access control */ | 319 | /* XXX some access control */ |
312 | 320 | ||
313 | /* Put fd into a non-blocking mode */ | 321 | /* Put fd into a non-blocking mode */ |
314 | io_nonblock( newfd ); | 322 | io_nonblock(newfd); |
315 | 323 | ||
316 | if( !io_fd( newfd ) ) | 324 | if (!io_fd(newfd)) |
317 | io_close( newfd ); | 325 | io_close(newfd); |
318 | else { | 326 | else { |
319 | /* Find a new home for our incoming connection */ | 327 | /* Find a new home for our incoming connection */ |
320 | int i; | 328 | int i; |
321 | for( i=0; i<MAX_PEERS; ++i ) | 329 | for (i = 0; i < MAX_PEERS; ++i) |
322 | if( g_connections[i].state == FLAG_DISCONNECTED ) | 330 | if (g_connections[i].state == FLAG_DISCONNECTED) |
323 | break; | 331 | break; |
324 | if( i == MAX_PEERS ) { | 332 | if (i == MAX_PEERS) { |
325 | fprintf( stderr, "No room for incoming connection." ); | 333 | fprintf(stderr, "No room for incoming connection."); |
326 | close( newfd ); | 334 | close(newfd); |
327 | continue; | 335 | continue; |
328 | } | 336 | } |
329 | 337 | ||
330 | /* Prepare connection info block */ | 338 | /* Prepare connection info block */ |
331 | reset_info_block( g_connections+i ); | 339 | reset_info_block(g_connections + i); |
332 | PROXYPEER_SETCONNECTING( g_connections[i].state ); | 340 | PROXYPEER_SETCONNECTING(g_connections[i].state); |
333 | g_connections[i].port = port; | 341 | g_connections[i].port = port; |
334 | g_connections[i].fd = newfd; | 342 | g_connections[i].fd = newfd; |
335 | 343 | ||
336 | io_setcookie( newfd, g_connections + i ); | 344 | io_setcookie(newfd, g_connections + i); |
337 | 345 | ||
338 | /* We expect the connecting side to begin with its tracker_id */ | 346 | /* We expect the connecting side to begin with its tracker_id */ |
339 | io_wantread( newfd ); | 347 | io_wantread(newfd); |
340 | } | 348 | } |
341 | } | 349 | } |
342 | 350 | ||
@@ -344,117 +352,116 @@ static void handle_accept( int64 serversocket ) { | |||
344 | } | 352 | } |
345 | 353 | ||
346 | /* New sync data on the stream */ | 354 | /* New sync data on the stream */ |
347 | static void handle_read( int64 peersocket ) { | 355 | static void handle_read(int64 peersocket) { |
348 | int i; | 356 | int i; |
349 | int64 datalen; | 357 | int64 datalen; |
350 | uint32_t tracker_id; | 358 | uint32_t tracker_id; |
351 | proxy_peer *peer = io_getcookie( peersocket ); | 359 | proxy_peer *peer = io_getcookie(peersocket); |
352 | 360 | ||
353 | if( !peer ) { | 361 | if (!peer) { |
354 | /* Can't happen ;) */ | 362 | /* Can't happen ;) */ |
355 | io_close( peersocket ); | 363 | io_close(peersocket); |
356 | return; | 364 | return; |
357 | } | 365 | } |
358 | switch( peer->state & FLAG_MASK ) { | 366 | switch (peer->state & FLAG_MASK) { |
359 | case FLAG_DISCONNECTED: | 367 | case FLAG_DISCONNECTED: |
360 | io_close( peersocket ); | 368 | io_close(peersocket); |
361 | break; /* Shouldnt happen */ | 369 | break; /* Shouldnt happen */ |
362 | case FLAG_CONNECTING: | 370 | case FLAG_CONNECTING: |
363 | case FLAG_WAITTRACKERID: | 371 | case FLAG_WAITTRACKERID: |
364 | /* We want at least the first four bytes to come at once, to avoid keeping extra states (for now) | 372 | /* We want at least the first four bytes to come at once, to avoid keeping extra states (for now) |
365 | This also catches 0 bytes reads == EOF and negative values, denoting connection errors */ | 373 | This also catches 0 bytes reads == EOF and negative values, denoting connection errors */ |
366 | if( io_tryread( peersocket, (void*)&tracker_id, sizeof( tracker_id ) ) != sizeof( tracker_id ) ) | 374 | if (io_tryread(peersocket, (void *)&tracker_id, sizeof(tracker_id)) != sizeof(tracker_id)) |
367 | goto close_socket; | 375 | goto close_socket; |
368 | 376 | ||
369 | /* See, if we already have a connection to that peer */ | 377 | /* See, if we already have a connection to that peer */ |
370 | for( i=0; i<MAX_PEERS; ++i ) | 378 | for (i = 0; i < MAX_PEERS; ++i) |
371 | if( ( g_connections[i].state & FLAG_MASK ) == FLAG_CONNECTED && | 379 | if ((g_connections[i].state & FLAG_MASK) == FLAG_CONNECTED && g_connections[i].tracker_id == tracker_id) { |
372 | g_connections[i].tracker_id == tracker_id ) { | 380 | fprintf(stderr, "Peer already connected. Closing connection.\n"); |
373 | fprintf( stderr, "Peer already connected. Closing connection.\n" ); | ||
374 | goto close_socket; | 381 | goto close_socket; |
375 | } | 382 | } |
376 | 383 | ||
377 | /* Also no need for soliloquy */ | 384 | /* Also no need for soliloquy */ |
378 | if( tracker_id == g_tracker_id ) | 385 | if (tracker_id == g_tracker_id) |
379 | goto close_socket; | 386 | goto close_socket; |
380 | 387 | ||
381 | /* The new connection is good, send our tracker_id on incoming connections */ | 388 | /* The new connection is good, send our tracker_id on incoming connections */ |
382 | if( peer->state == FLAG_CONNECTING ) | 389 | if (peer->state == FLAG_CONNECTING) |
383 | if( io_trywrite( peersocket, (void*)&g_tracker_id, sizeof( g_tracker_id ) ) != sizeof( g_tracker_id ) ) | 390 | if (io_trywrite(peersocket, (void *)&g_tracker_id, sizeof(g_tracker_id)) != sizeof(g_tracker_id)) |
384 | goto close_socket; | 391 | goto close_socket; |
385 | 392 | ||
386 | peer->tracker_id = tracker_id; | 393 | peer->tracker_id = tracker_id; |
387 | PROXYPEER_SETCONNECTED( peer->state ); | 394 | PROXYPEER_SETCONNECTED(peer->state); |
388 | 395 | ||
389 | if( peer->state & FLAG_OUTGOING ) | 396 | if (peer->state & FLAG_OUTGOING) |
390 | fprintf( stderr, "succeeded.\n" ); | 397 | fprintf(stderr, "succeeded.\n"); |
391 | else | 398 | else |
392 | fprintf( stderr, "Incoming connection successful.\n" ); | 399 | fprintf(stderr, "Incoming connection successful.\n"); |
393 | 400 | ||
394 | break; | 401 | break; |
395 | close_socket: | 402 | close_socket: |
396 | fprintf( stderr, "Handshake incomplete, closing socket\n" ); | 403 | fprintf(stderr, "Handshake incomplete, closing socket\n"); |
397 | io_close( peersocket ); | 404 | io_close(peersocket); |
398 | reset_info_block( peer ); | 405 | reset_info_block(peer); |
399 | break; | 406 | break; |
400 | case FLAG_CONNECTED: | 407 | case FLAG_CONNECTED: |
401 | /* Here we acutally expect data from peer | 408 | /* Here we acutally expect data from peer |
402 | indata_length should be less than 20+256*7 bytes, for incomplete torrent entries */ | 409 | indata_length should be less than 20+256*7 bytes, for incomplete torrent entries */ |
403 | datalen = io_tryread( peersocket, (void*)(peer->indata + peer->indata_length), sizeof( peer->indata ) - peer->indata_length ); | 410 | datalen = io_tryread(peersocket, (void *)(peer->indata + peer->indata_length), sizeof(peer->indata) - peer->indata_length); |
404 | if( !datalen || datalen < -1 ) { | 411 | if (!datalen || datalen < -1) { |
405 | fprintf( stderr, "Connection closed by remote peer.\n" ); | 412 | fprintf(stderr, "Connection closed by remote peer.\n"); |
406 | io_close( peersocket ); | 413 | io_close(peersocket); |
407 | reset_info_block( peer ); | 414 | reset_info_block(peer); |
408 | } else if( datalen > 0 ) { | 415 | } else if (datalen > 0) { |
409 | peer->indata_length += datalen; | 416 | peer->indata_length += datalen; |
410 | process_indata( peer ); | 417 | process_indata(peer); |
411 | } | 418 | } |
412 | break; | 419 | break; |
413 | } | 420 | } |
414 | } | 421 | } |
415 | 422 | ||
416 | /* Can write new sync data to the stream */ | 423 | /* Can write new sync data to the stream */ |
417 | static void handle_write( int64 peersocket ) { | 424 | static void handle_write(int64 peersocket) { |
418 | proxy_peer *peer = io_getcookie( peersocket ); | 425 | proxy_peer *peer = io_getcookie(peersocket); |
419 | 426 | ||
420 | if( !peer ) { | 427 | if (!peer) { |
421 | /* Can't happen ;) */ | 428 | /* Can't happen ;) */ |
422 | io_close( peersocket ); | 429 | io_close(peersocket); |
423 | return; | 430 | return; |
424 | } | 431 | } |
425 | 432 | ||
426 | switch( peer->state & FLAG_MASK ) { | 433 | switch (peer->state & FLAG_MASK) { |
427 | case FLAG_DISCONNECTED: | 434 | case FLAG_DISCONNECTED: |
428 | default: /* Should not happen */ | 435 | default: /* Should not happen */ |
429 | io_close( peersocket ); | 436 | io_close(peersocket); |
430 | break; | 437 | break; |
431 | case FLAG_CONNECTING: | 438 | case FLAG_CONNECTING: |
432 | /* Ensure that the connection is established and handle connection error */ | 439 | /* Ensure that the connection is established and handle connection error */ |
433 | if( peer->state & FLAG_OUTGOING && !socket_connected( peersocket ) ) { | 440 | if (peer->state & FLAG_OUTGOING && !socket_connected(peersocket)) { |
434 | fprintf( stderr, "failed\n" ); | 441 | fprintf(stderr, "failed\n"); |
435 | reset_info_block( peer ); | 442 | reset_info_block(peer); |
436 | io_close( peersocket ); | 443 | io_close(peersocket); |
437 | break; | 444 | break; |
438 | } | 445 | } |
439 | 446 | ||
440 | if( io_trywrite( peersocket, (void*)&g_tracker_id, sizeof( g_tracker_id ) ) == sizeof( g_tracker_id ) ) { | 447 | if (io_trywrite(peersocket, (void *)&g_tracker_id, sizeof(g_tracker_id)) == sizeof(g_tracker_id)) { |
441 | PROXYPEER_SETWAITTRACKERID( peer->state ); | 448 | PROXYPEER_SETWAITTRACKERID(peer->state); |
442 | io_dontwantwrite( peersocket ); | 449 | io_dontwantwrite(peersocket); |
443 | io_wantread( peersocket ); | 450 | io_wantread(peersocket); |
444 | } else { | 451 | } else { |
445 | fprintf( stderr, "Handshake incomplete, closing socket\n" ); | 452 | fprintf(stderr, "Handshake incomplete, closing socket\n"); |
446 | io_close( peersocket ); | 453 | io_close(peersocket); |
447 | reset_info_block( peer ); | 454 | reset_info_block(peer); |
448 | } | 455 | } |
449 | break; | 456 | break; |
450 | case FLAG_CONNECTED: | 457 | case FLAG_CONNECTED: |
451 | switch( iob_send( peersocket, &peer->outdata ) ) { | 458 | switch (iob_send(peersocket, &peer->outdata)) { |
452 | case 0: /* all data sent */ | 459 | case 0: /* all data sent */ |
453 | io_dontwantwrite( peersocket ); | 460 | io_dontwantwrite(peersocket); |
454 | break; | 461 | break; |
455 | case -3: /* an error occured */ | 462 | case -3: /* an error occured */ |
456 | io_close( peersocket ); | 463 | io_close(peersocket); |
457 | reset_info_block( peer ); | 464 | reset_info_block(peer); |
458 | break; | 465 | break; |
459 | default: /* Normal operation or eagain */ | 466 | default: /* Normal operation or eagain */ |
460 | break; | 467 | break; |
@@ -469,290 +476,324 @@ static void server_mainloop() { | |||
469 | int64 sock; | 476 | int64 sock; |
470 | 477 | ||
471 | /* inlined livesync_init() */ | 478 | /* inlined livesync_init() */ |
472 | memset( g_peerbuffer_start, 0, sizeof( g_peerbuffer_start ) ); | 479 | memset(g_peerbuffer_start, 0, sizeof(g_peerbuffer_start)); |
473 | g_peerbuffer_pos = g_peerbuffer_start; | 480 | g_peerbuffer_pos = g_peerbuffer_start; |
474 | memcpy( g_peerbuffer_pos, &g_tracker_id, sizeof( g_tracker_id ) ); | 481 | memcpy(g_peerbuffer_pos, &g_tracker_id, sizeof(g_tracker_id)); |
475 | uint32_pack_big( (char*)g_peerbuffer_pos + sizeof( g_tracker_id ), OT_SYNC_PEER); | 482 | uint32_pack_big((char *)g_peerbuffer_pos + sizeof(g_tracker_id), OT_SYNC_PEER); |
476 | g_peerbuffer_pos += sizeof( g_tracker_id ) + sizeof( uint32_t); | 483 | g_peerbuffer_pos += sizeof(g_tracker_id) + sizeof(uint32_t); |
477 | g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY; | 484 | g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY; |
478 | 485 | ||
479 | while(1) { | 486 | while (1) { |
480 | /* See, if we need to connect to anyone */ | 487 | /* See if we need to connect to anyone */ |
481 | if( time(NULL) > g_connection_reconn ) | 488 | if (time(NULL) > g_connection_reconn) |
482 | handle_reconnects( ); | 489 | handle_reconnects(); |
483 | 490 | ||
484 | /* Wait for io events until next approx reconn check time */ | 491 | /* Wait for io events until next approx reconn check time */ |
485 | io_waituntil2( 30*1000 ); | 492 | io_waituntil2(30 * 1000); |
486 | 493 | ||
487 | /* Loop over readable sockets */ | 494 | /* Loop over readable sockets */ |
488 | while( ( sock = io_canread( ) ) != -1 ) { | 495 | while ((sock = io_canread()) != -1) { |
489 | const void *cookie = io_getcookie( sock ); | 496 | const void *cookie = io_getcookie(sock); |
490 | if( (uintptr_t)cookie == FLAG_SERVERSOCKET ) | 497 | if ((uintptr_t)cookie == FLAG_SERVERSOCKET) |
491 | handle_accept( sock ); | 498 | handle_accept(sock); |
492 | else | 499 | else |
493 | handle_read( sock ); | 500 | handle_read(sock); |
494 | } | 501 | } |
495 | 502 | ||
496 | /* Loop over writable sockets */ | 503 | /* Loop over writable sockets */ |
497 | while( ( sock = io_canwrite( ) ) != -1 ) | 504 | while ((sock = io_canwrite()) != -1) |
498 | handle_write( sock ); | 505 | handle_write(sock); |
499 | 506 | ||
500 | livesync_ticker( ); | 507 | livesync_ticker(); |
501 | } | 508 | } |
502 | } | 509 | } |
503 | 510 | ||
504 | static void panic( const char *routine ) { | 511 | static void panic(const char *routine) { |
505 | fprintf( stderr, "%s: %s\n", routine, strerror(errno) ); | 512 | fprintf(stderr, "%s: %s\n", routine, strerror(errno)); |
506 | exit( 111 ); | 513 | exit(111); |
507 | } | 514 | } |
508 | 515 | ||
509 | static int64_t ot_try_bind( ot_ip6 ip, uint16_t port ) { | 516 | static int64_t ot_try_bind(ot_ip6 ip, uint16_t port) { |
510 | int64 sock = socket_tcp6( ); | 517 | int64 sock = socket_tcp6(); |
511 | 518 | ||
512 | if( socket_bind6_reuse( sock, ip, port, 0 ) == -1 ) | 519 | if (socket_bind6_reuse(sock, ip, port, 0) == -1) |
513 | panic( "socket_bind6_reuse" ); | 520 | panic("socket_bind6_reuse"); |
514 | 521 | ||
515 | if( socket_listen( sock, SOMAXCONN) == -1 ) | 522 | if (socket_listen(sock, SOMAXCONN) == -1) |
516 | panic( "socket_listen" ); | 523 | panic("socket_listen"); |
517 | 524 | ||
518 | if( !io_fd( sock ) ) | 525 | if (!io_fd(sock)) |
519 | panic( "io_fd" ); | 526 | panic("io_fd"); |
520 | 527 | ||
521 | io_setcookie( sock, (void*)FLAG_SERVERSOCKET ); | 528 | io_setcookie(sock, (void *)FLAG_SERVERSOCKET); |
522 | io_wantread( sock ); | 529 | io_wantread(sock); |
523 | return sock; | 530 | return sock; |
524 | } | 531 | } |
525 | 532 | ||
526 | 533 | static int scan_ip6_port(const char *src, ot_ip6 ip, uint16 *port) { | |
527 | static int scan_ip6_port( const char *src, ot_ip6 ip, uint16 *port ) { | ||
528 | const char *s = src; | 534 | const char *s = src; |
529 | int off, bracket = 0; | 535 | int off, bracket = 0; |
530 | while( isspace(*s) ) ++s; | 536 | while (isspace(*s)) |
531 | if( *s == '[' ) ++s, ++bracket; /* for v6 style notation */ | 537 | ++s; |
532 | if( !(off = scan_ip6( s, ip ) ) ) | 538 | if (*s == '[') |
539 | ++s, ++bracket; /* for v6 style notation */ | ||
540 | if (!(off = scan_ip6(s, ip))) | ||
533 | return 0; | 541 | return 0; |
534 | s += off; | 542 | s += off; |
535 | if( *s == 0 || isspace(*s)) return s-src; | 543 | if (*s == 0 || isspace(*s)) |
536 | if( *s == ']' && bracket ) ++s; | 544 | return s - src; |
537 | if( !ip6_isv4mapped(ip)){ | 545 | if (*s == ']' && bracket) |
538 | if( ( bracket && *(s) != ':' ) || ( *(s) != '.' ) ) return 0; | 546 | ++s; |
547 | if (!ip6_isv4mapped(ip)) { | ||
548 | if ((bracket && *(s) != ':') || (*(s) != '.')) | ||
549 | return 0; | ||
539 | s++; | 550 | s++; |
540 | } else { | 551 | } else { |
541 | if( *(s++) != ':' ) return 0; | 552 | if (*(s++) != ':') |
553 | return 0; | ||
542 | } | 554 | } |
543 | if( !(off = scan_ushort (s, port ) ) ) | 555 | if (!(off = scan_ushort(s, port))) |
544 | return 0; | 556 | return 0; |
545 | return off+s-src; | 557 | return off + s - src; |
546 | } | 558 | } |
547 | 559 | ||
548 | int main( int argc, char **argv ) { | 560 | int main(int argc, char **argv) { |
549 | static pthread_t sync_in_thread_id; | 561 | static pthread_t sync_in_thread_id; |
550 | static pthread_t sync_out_thread_id; | 562 | static pthread_t sync_out_thread_id; |
551 | ot_ip6 serverip; | 563 | ot_ip6 serverip; |
552 | uint16_t tmpport; | 564 | uint16_t tmpport; |
553 | int scanon = 1, lbound = 0, sbound = 0; | 565 | int scanon = 1, lbound = 0, sbound = 0; |
554 | 566 | ||
555 | srandom( time(NULL) ); | 567 | srandom(time(NULL)); |
556 | #ifdef WANT_ARC4RANDOM | 568 | #ifdef WANT_ARC4RANDOM |
557 | g_tracker_id = arc4random(); | 569 | g_tracker_id = arc4random(); |
558 | #else | 570 | #else |
559 | g_tracker_id = random(); | 571 | g_tracker_id = random(); |
560 | #endif | 572 | #endif |
561 | noipv6=1; | ||
562 | 573 | ||
563 | while( scanon ) { | 574 | while (scanon) { |
564 | switch( getopt( argc, argv, ":l:c:L:h" ) ) { | 575 | switch (getopt(argc, argv, ":l:c:L:h")) { |
565 | case -1: scanon = 0; break; | 576 | case -1: |
577 | scanon = 0; | ||
578 | break; | ||
566 | case 'l': | 579 | case 'l': |
567 | tmpport = 0; | 580 | tmpport = 0; |
568 | if( !scan_ip6_port( optarg, serverip, &tmpport ) || !tmpport ) { usage( argv[0] ); exit( 1 ); } | 581 | if (!scan_ip6_port(optarg, serverip, &tmpport) || !tmpport) { |
569 | ot_try_bind( serverip, tmpport ); | 582 | usage(argv[0]); |
583 | exit(1); | ||
584 | } | ||
585 | ot_try_bind(serverip, tmpport); | ||
570 | ++sbound; | 586 | ++sbound; |
571 | break; | 587 | break; |
572 | case 'c': | 588 | case 'c': |
573 | if( g_connection_count > MAX_PEERS / 2 ) exerr( "Connection limit exceeded.\n" ); | 589 | if (g_connection_count > MAX_PEERS / 2) |
590 | exerr("Connection limit exceeded.\n"); | ||
574 | tmpport = 0; | 591 | tmpport = 0; |
575 | if( !scan_ip6_port( optarg, | 592 | if (!scan_ip6_port(optarg, g_connections[g_connection_count].ip, &g_connections[g_connection_count].port) || !g_connections[g_connection_count].port) { |
576 | g_connections[g_connection_count].ip, | 593 | usage(argv[0]); |
577 | &g_connections[g_connection_count].port ) || | 594 | exit(1); |
578 | !g_connections[g_connection_count].port ) { usage( argv[0] ); exit( 1 ); } | 595 | } |
579 | g_connections[g_connection_count++].state = FLAG_OUTGOING; | 596 | g_connections[g_connection_count++].state = FLAG_OUTGOING; |
580 | break; | 597 | break; |
581 | case 'L': | 598 | case 'L': |
582 | tmpport = 9696; | 599 | tmpport = 9696; |
583 | if( !scan_ip6_port( optarg, serverip, &tmpport ) || !tmpport ) { usage( argv[0] ); exit( 1 ); } | 600 | if (!scan_ip6_port(optarg, serverip, &tmpport) || !tmpport) { |
584 | livesync_bind_mcast( serverip, tmpport); ++lbound; break; | 601 | usage(argv[0]); |
602 | exit(1); | ||
603 | } | ||
604 | livesync_bind_mcast(serverip, tmpport); | ||
605 | ++lbound; | ||
606 | break; | ||
585 | default: | 607 | default: |
586 | case '?': usage( argv[0] ); exit( 1 ); | 608 | case '?': |
609 | usage(argv[0]); | ||
610 | exit(1); | ||
587 | } | 611 | } |
588 | } | 612 | } |
589 | 613 | ||
590 | if( !lbound ) exerr( "No livesync port bound." ); | 614 | if (!lbound) |
591 | if( !g_connection_count && !sbound ) exerr( "No streamsync port bound." ); | 615 | exerr("No livesync port bound."); |
592 | pthread_create( &sync_in_thread_id, NULL, livesync_worker, NULL ); | 616 | if (!g_connection_count && !sbound) |
593 | pthread_create( &sync_out_thread_id, NULL, streamsync_worker, NULL ); | 617 | exerr("No streamsync port bound."); |
618 | pthread_create(&sync_in_thread_id, NULL, livesync_worker, NULL); | ||
619 | pthread_create(&sync_out_thread_id, NULL, streamsync_worker, NULL); | ||
594 | 620 | ||
595 | server_mainloop(); | 621 | server_mainloop(); |
596 | return 0; | 622 | return 0; |
597 | } | 623 | } |
598 | 624 | ||
599 | static void * streamsync_worker( void * args ) { | 625 | static void *streamsync_worker(void *args) { |
600 | (void)args; | 626 | (void)args; |
601 | while( 1 ) { | 627 | while (1) { |
602 | int bucket; | 628 | int bucket; |
603 | /* For each bucket... */ | 629 | /* For each bucket... */ |
604 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 630 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
605 | /* Get exclusive access to that bucket */ | 631 | /* Get exclusive access to that bucket */ |
606 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 632 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
607 | size_t tor_offset, count_def = 0, count_one = 0, count_two = 0, count_peers = 0; | 633 | size_t tor_offset, count_def = 0, count_one = 0, count_two = 0, count_peers = 0; |
608 | size_t mem, mem_a = 0, mem_b = 0; | 634 | size_t mem, mem_a = 0, mem_b = 0; |
609 | uint8_t *ptr = 0, *ptr_a, *ptr_b, *ptr_c; | 635 | uint8_t *ptr = 0, *ptr_a, *ptr_b, *ptr_c; |
610 | 636 | ||
611 | if( !torrents_list->size ) goto unlock_continue; | 637 | if (!torrents_list->size) |
638 | goto unlock_continue; | ||
612 | 639 | ||
613 | /* For each torrent in this bucket.. */ | 640 | /* For each torrent in this bucket.. */ |
614 | for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { | 641 | for (tor_offset = 0; tor_offset < torrents_list->size; ++tor_offset) { |
615 | /* Address torrents members */ | 642 | /* Address torrents members */ |
616 | ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[tor_offset] ).peer_list; | 643 | ot_peerlist *peer_list = (((ot_torrent *)(torrents_list->data))[tor_offset]).peer_list; |
617 | switch( peer_list->peer_count ) { | 644 | switch (peer_list->peer_count) { |
618 | case 2: count_two++; break; | 645 | case 2: |
619 | case 1: count_one++; break; | 646 | count_two++; |
620 | case 0: break; | 647 | break; |
621 | default: count_def++; | 648 | case 1: |
622 | count_peers += peer_list->peer_count; | 649 | count_one++; |
650 | break; | ||
651 | case 0: | ||
652 | break; | ||
653 | default: | ||
654 | count_def++; | ||
655 | count_peers += peer_list->peer_count; | ||
623 | } | 656 | } |
624 | } | 657 | } |
625 | 658 | ||
626 | /* Maximal memory requirement: max 3 blocks, max torrents * 20 + max peers * 7 */ | 659 | /* Maximal memory requirement: max 3 blocks, max torrents * 20 + max peers * 7 */ |
627 | mem = 3 * ( 1 + 1 + 2 ) + ( count_one + count_two ) * ( 19 + 1 ) + count_def * ( 19 + 8 ) + | 660 | mem = 3 * (1 + 1 + 2) + (count_one + count_two) * (19 + 1) + count_def * (19 + 8) + (count_one + 2 * count_two + count_peers) * 7; |
628 | ( count_one + 2 * count_two + count_peers ) * 7; | 661 | |
629 | 662 | fprintf(stderr, "Mem: %zd\n", mem); | |
630 | fprintf( stderr, "Mem: %zd\n", mem ); | 663 | |
631 | 664 | ptr = ptr_a = ptr_b = ptr_c = malloc(mem); | |
632 | ptr = ptr_a = ptr_b = ptr_c = malloc( mem ); | 665 | if (!ptr) |
633 | if( !ptr ) goto unlock_continue; | 666 | goto unlock_continue; |
634 | 667 | ||
635 | if( count_one > 4 || !count_def ) { | 668 | if (count_one > 4 || !count_def) { |
636 | mem_a = 1 + 1 + 2 + count_one * ( 19 + 7 ); | 669 | mem_a = 1 + 1 + 2 + count_one * (19 + 7); |
637 | ptr_b += mem_a; ptr_c += mem_a; | 670 | ptr_b += mem_a; |
638 | ptr_a[0] = 1; /* Offset 0: packet type 1 */ | 671 | ptr_c += mem_a; |
639 | ptr_a[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ | 672 | ptr_a[0] = 1; /* Offset 0: packet type 1 */ |
640 | ptr_a[2] = count_one >> 8; | 673 | ptr_a[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ |
641 | ptr_a[3] = count_one & 255; | 674 | ptr_a[2] = count_one >> 8; |
642 | ptr_a += 4; | 675 | ptr_a[3] = count_one & 255; |
676 | ptr_a += 4; | ||
643 | } else | 677 | } else |
644 | count_def += count_one; | 678 | count_def += count_one; |
645 | 679 | ||
646 | if( count_two > 4 || !count_def ) { | 680 | if (count_two > 4 || !count_def) { |
647 | mem_b = 1 + 1 + 2 + count_two * ( 19 + 14 ); | 681 | mem_b = 1 + 1 + 2 + count_two * (19 + 14); |
648 | ptr_c += mem_b; | 682 | ptr_c += mem_b; |
649 | ptr_b[0] = 2; /* Offset 0: packet type 2 */ | 683 | ptr_b[0] = 2; /* Offset 0: packet type 2 */ |
650 | ptr_b[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ | 684 | ptr_b[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ |
651 | ptr_b[2] = count_two >> 8; | 685 | ptr_b[2] = count_two >> 8; |
652 | ptr_b[3] = count_two & 255; | 686 | ptr_b[3] = count_two & 255; |
653 | ptr_b += 4; | 687 | ptr_b += 4; |
654 | } else | 688 | } else |
655 | count_def += count_two; | 689 | count_def += count_two; |
656 | 690 | ||
657 | if( count_def ) { | 691 | if (count_def) { |
658 | ptr_c[0] = 0; /* Offset 0: packet type 0 */ | 692 | ptr_c[0] = 0; /* Offset 0: packet type 0 */ |
659 | ptr_c[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ | 693 | ptr_c[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ |
660 | ptr_c[2] = count_def >> 8; | 694 | ptr_c[2] = count_def >> 8; |
661 | ptr_c[3] = count_def & 255; | 695 | ptr_c[3] = count_def & 255; |
662 | ptr_c += 4; | 696 | ptr_c += 4; |
663 | } | 697 | } |
664 | 698 | ||
665 | /* For each torrent in this bucket.. */ | 699 | /* For each torrent in this bucket.. */ |
666 | for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { | 700 | for (tor_offset = 0; tor_offset < torrents_list->size; ++tor_offset) { |
667 | /* Address torrents members */ | 701 | /* Address torrents members */ |
668 | ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + tor_offset; | 702 | ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + tor_offset; |
669 | ot_peerlist *peer_list = torrent->peer_list; | 703 | ot_peerlist *peer_list = torrent->peer_list; |
670 | ot_peer *peers = (ot_peer*)(peer_list->peers.data); | 704 | ot_peer *peers = (ot_peer *)(peer_list->peers.data); |
671 | uint8_t **dst; | 705 | uint8_t **dst; |
672 | 706 | ||
673 | /* Determine destination slot */ | 707 | /* Determine destination slot */ |
674 | count_peers = peer_list->peer_count; | 708 | count_peers = peer_list->peer_count; |
675 | switch( count_peers ) { | 709 | switch (count_peers) { |
676 | case 0: continue; | 710 | case 0: |
677 | case 1: dst = mem_a ? &ptr_a : &ptr_c; break; | 711 | continue; |
678 | case 2: dst = mem_b ? &ptr_b : &ptr_c; break; | 712 | case 1: |
679 | default: dst = &ptr_c; break; | 713 | dst = mem_a ? &ptr_a : &ptr_c; |
714 | break; | ||
715 | case 2: | ||
716 | dst = mem_b ? &ptr_b : &ptr_c; | ||
717 | break; | ||
718 | default: | ||
719 | dst = &ptr_c; | ||
720 | break; | ||
680 | } | 721 | } |
681 | 722 | ||
682 | /* Copy tail of info_hash, advance pointer */ | 723 | /* Copy tail of info_hash, advance pointer */ |
683 | memcpy( *dst, ((uint8_t*)torrent->hash) + 1, sizeof( ot_hash ) - 1); | 724 | memcpy(*dst, ((uint8_t *)torrent->hash) + 1, sizeof(ot_hash) - 1); |
684 | *dst += sizeof( ot_hash ) - 1; | 725 | *dst += sizeof(ot_hash) - 1; |
685 | 726 | ||
686 | /* Encode peer count */ | 727 | /* Encode peer count */ |
687 | if( dst == &ptr_c ) | 728 | if (dst == &ptr_c) |
688 | while( count_peers ) { | 729 | while (count_peers) { |
689 | if( count_peers <= 0x7f ) | 730 | if (count_peers <= 0x7f) |
690 | *(*dst)++ = count_peers; | 731 | *(*dst)++ = count_peers; |
691 | else | 732 | else |
692 | *(*dst)++ = 0x80 | ( count_peers & 0x7f ); | 733 | *(*dst)++ = 0x80 | (count_peers & 0x7f); |
693 | count_peers >>= 7; | 734 | count_peers >>= 7; |
694 | } | 735 | } |
695 | 736 | ||
696 | /* Copy peers */ | 737 | /* Copy peers */ |
697 | count_peers = peer_list->peer_count; | 738 | count_peers = peer_list->peer_count; |
698 | while( count_peers-- ) { | 739 | while (count_peers--) { |
699 | memcpy( *dst, peers++, OT_IP_SIZE + 3 ); | 740 | memcpy(*dst, peers++, OT_IP_SIZE + 3); |
700 | *dst += OT_IP_SIZE + 3; | 741 | *dst += OT_IP_SIZE + 3; |
701 | } | 742 | } |
702 | free_peerlist(peer_list); | 743 | free_peerlist(peer_list); |
703 | } | 744 | } |
704 | 745 | ||
705 | free( torrents_list->data ); | 746 | free(torrents_list->data); |
706 | memset( torrents_list, 0, sizeof(*torrents_list ) ); | 747 | memset(torrents_list, 0, sizeof(*torrents_list)); |
707 | unlock_continue: | 748 | unlock_continue: |
708 | mutex_bucket_unlock( bucket, 0 ); | 749 | mutex_bucket_unlock(bucket, 0); |
709 | 750 | ||
710 | if( ptr ) { | 751 | if (ptr) { |
711 | int i; | 752 | int i; |
712 | 753 | ||
713 | if( ptr_b > ptr_c ) ptr_c = ptr_b; | 754 | if (ptr_b > ptr_c) |
714 | if( ptr_a > ptr_c ) ptr_c = ptr_a; | 755 | ptr_c = ptr_b; |
756 | if (ptr_a > ptr_c) | ||
757 | ptr_c = ptr_a; | ||
715 | mem = ptr_c - ptr; | 758 | mem = ptr_c - ptr; |
716 | 759 | ||
717 | for( i=0; i < MAX_PEERS; ++i ) { | 760 | for (i = 0; i < MAX_PEERS; ++i) { |
718 | if( PROXYPEER_ISCONNECTED(g_connections[i].state) ) { | 761 | if (PROXYPEER_ISCONNECTED(g_connections[i].state)) { |
719 | void *tmp = malloc( mem ); | 762 | void *tmp = malloc(mem); |
720 | if( tmp ) { | 763 | if (tmp) { |
721 | memcpy( tmp, ptr, mem ); | 764 | memcpy(tmp, ptr, mem); |
722 | iob_addbuf_free( &g_connections[i].outdata, tmp, mem ); | 765 | iob_addbuf_free(&g_connections[i].outdata, tmp, mem); |
723 | io_wantwrite( g_connections[i].fd ); | 766 | io_wantwrite(g_connections[i].fd); |
724 | } | 767 | } |
725 | } | 768 | } |
726 | } | 769 | } |
727 | 770 | ||
728 | free( ptr ); | 771 | free(ptr); |
729 | } | 772 | } |
730 | usleep( OT_SYNC_SLEEP ); | 773 | usleep(OT_SYNC_SLEEP); |
731 | } | 774 | } |
732 | } | 775 | } |
733 | return 0; | 776 | return 0; |
734 | } | 777 | } |
735 | 778 | ||
736 | static void livesync_issue_peersync( ) { | 779 | static void livesync_issue_peersync() { |
737 | socket_send4(g_socket_out, (char*)g_peerbuffer_start, g_peerbuffer_pos - g_peerbuffer_start, | 780 | socket_send4(g_socket_out, (char *)g_peerbuffer_start, g_peerbuffer_pos - g_peerbuffer_start, groupip_1, LIVESYNC_PORT); |
738 | groupip_1, LIVESYNC_PORT); | 781 | g_peerbuffer_pos = g_peerbuffer_start + sizeof(g_tracker_id) + sizeof(uint32_t); |
739 | g_peerbuffer_pos = g_peerbuffer_start + sizeof( g_tracker_id ) + sizeof( uint32_t ); | ||
740 | g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY; | 782 | g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY; |
741 | } | 783 | } |
742 | 784 | ||
743 | void livesync_ticker( ) { | 785 | void livesync_ticker() { |
744 | /* livesync_issue_peersync sets g_next_packet_time */ | 786 | /* livesync_issue_peersync sets g_next_packet_time */ |
745 | if( time(NULL) > g_next_packet_time && | 787 | if (time(NULL) > g_next_packet_time && g_peerbuffer_pos > g_peerbuffer_start + sizeof(g_tracker_id)) |
746 | g_peerbuffer_pos > g_peerbuffer_start + sizeof( g_tracker_id ) ) | ||
747 | livesync_issue_peersync(); | 788 | livesync_issue_peersync(); |
748 | } | 789 | } |
749 | 790 | ||
750 | static void livesync_proxytell( uint8_t prefix, uint8_t *info_hash, uint8_t *peer ) { | 791 | static void livesync_proxytell(uint8_t prefix, uint8_t *info_hash, uint8_t *peer) { |
751 | // unsigned int i; | 792 | // unsigned int i; |
752 | 793 | ||
753 | *g_peerbuffer_pos = prefix; | 794 | *g_peerbuffer_pos = prefix; |
754 | memcpy( g_peerbuffer_pos + 1, info_hash, sizeof(ot_hash) - 1 ); | 795 | memcpy(g_peerbuffer_pos + 1, info_hash, sizeof(ot_hash) - 1); |
755 | memcpy( g_peerbuffer_pos + sizeof(ot_hash), peer, sizeof(ot_peer) - 1 ); | 796 | memcpy(g_peerbuffer_pos + sizeof(ot_hash), peer, sizeof(ot_peer) - 1); |
756 | 797 | ||
757 | #if 0 | 798 | #if 0 |
758 | /* Dump info_hash */ | 799 | /* Dump info_hash */ |
@@ -767,77 +808,84 @@ static void livesync_proxytell( uint8_t prefix, uint8_t *info_hash, uint8_t *pee | |||
767 | #endif | 808 | #endif |
768 | g_peerbuffer_pos += sizeof(ot_peer); | 809 | g_peerbuffer_pos += sizeof(ot_peer); |
769 | 810 | ||
770 | if( g_peerbuffer_pos >= g_peerbuffer_highwater ) | 811 | if (g_peerbuffer_pos >= g_peerbuffer_highwater) |
771 | livesync_issue_peersync(); | 812 | livesync_issue_peersync(); |
772 | } | 813 | } |
773 | 814 | ||
774 | static void process_indata( proxy_peer * peer ) { | 815 | static void process_indata(proxy_peer *peer) { |
775 | size_t consumed, peers; | 816 | size_t consumed, peers; |
776 | uint8_t *data = peer->indata, *hash; | 817 | uint8_t *data = peer->indata, *hash; |
777 | uint8_t *dataend = data + peer->indata_length; | 818 | uint8_t *dataend = data + peer->indata_length; |
778 | 819 | ||
779 | while( 1 ) { | 820 | while (1) { |
780 | /* If we're not inside of a packet, make a new one */ | 821 | /* If we're not inside of a packet, make a new one */ |
781 | if( !peer->packet_tcount ) { | 822 | if (!peer->packet_tcount) { |
782 | /* Ensure the header is complete or postpone processing */ | 823 | /* Ensure the header is complete or postpone processing */ |
783 | if( data + 4 > dataend ) break; | 824 | if (data + 4 > dataend) |
784 | peer->packet_type = data[0]; | 825 | break; |
785 | peer->packet_tprefix = data[1]; | 826 | peer->packet_type = data[0]; |
786 | peer->packet_tcount = data[2] * 256 + data[3]; | 827 | peer->packet_tprefix = data[1]; |
787 | data += 4; | 828 | peer->packet_tcount = data[2] * 256 + data[3]; |
788 | printf( "type: %hhu, prefix: %02X, torrentcount: %zd\n", peer->packet_type, peer->packet_tprefix, peer->packet_tcount ); | 829 | data += 4; |
830 | printf("type: %hhu, prefix: %02X, torrentcount: %zd\n", peer->packet_type, peer->packet_tprefix, peer->packet_tcount); | ||
789 | } | 831 | } |
790 | 832 | ||
791 | /* Ensure size for a minimal torrent block */ | 833 | /* Ensure size for a minimal torrent block */ |
792 | if( data + sizeof(ot_hash) + OT_IP_SIZE + 3 > dataend ) break; | 834 | if (data + sizeof(ot_hash) + OT_IP_SIZE + 3 > dataend) |
835 | break; | ||
793 | 836 | ||
794 | /* Advance pointer to peer count or peers */ | 837 | /* Advance pointer to peer count or peers */ |
795 | hash = data; | 838 | hash = data; |
796 | data += sizeof(ot_hash) - 1; | 839 | data += sizeof(ot_hash) - 1; |
797 | 840 | ||
798 | /* Type 0 has peer count encoded before each peers */ | 841 | /* Type 0 has peer count encoded before each peers */ |
799 | peers = peer->packet_type; | 842 | peers = peer->packet_type; |
800 | if( !peers ) { | 843 | if (!peers) { |
801 | int shift = 0; | 844 | int shift = 0; |
802 | do peers |= ( 0x7f & *data ) << ( 7 * shift ); | 845 | do |
803 | while ( *(data++) & 0x80 && shift++ < 6 ); | 846 | peers |= (0x7f & *data) << (7 * shift); |
847 | while (*(data++) & 0x80 && shift++ < 6); | ||
804 | } | 848 | } |
805 | #if 0 | 849 | #if 0 |
806 | printf( "peers: %zd\n", peers ); | 850 | printf( "peers: %zd\n", peers ); |
807 | #endif | 851 | #endif |
808 | /* Ensure enough data being read to hold all peers */ | 852 | /* Ensure enough data being read to hold all peers */ |
809 | if( data + (OT_IP_SIZE + 3) * peers > dataend ) { | 853 | if (data + (OT_IP_SIZE + 3) * peers > dataend) { |
810 | data = hash; | 854 | data = hash; |
811 | break; | 855 | break; |
812 | } | 856 | } |
813 | while( peers-- ) { | 857 | while (peers--) { |
814 | livesync_proxytell( peer->packet_tprefix, hash, data ); | 858 | livesync_proxytell(peer->packet_tprefix, hash, data); |
815 | data += OT_IP_SIZE + 3; | 859 | data += OT_IP_SIZE + 3; |
816 | } | 860 | } |
817 | --peer->packet_tcount; | 861 | --peer->packet_tcount; |
818 | } | 862 | } |
819 | 863 | ||
820 | consumed = data - peer->indata; | 864 | consumed = data - peer->indata; |
821 | memmove( peer->indata, data, peer->indata_length - consumed ); | 865 | memmove(peer->indata, data, peer->indata_length - consumed); |
822 | peer->indata_length -= consumed; | 866 | peer->indata_length -= consumed; |
823 | } | 867 | } |
824 | 868 | ||
825 | static void * livesync_worker( void * args ) { | 869 | static void *livesync_worker(void *args) { |
826 | (void)args; | 870 | (void)args; |
827 | while( 1 ) { | 871 | while (1) { |
828 | ot_ip6 in_ip; uint16_t in_port; | 872 | ot_ip6 in_ip; |
829 | size_t datalen = socket_recv4(g_socket_in, (char*)g_inbuffer, LIVESYNC_INCOMING_BUFFSIZE, 12+(char*)in_ip, &in_port); | 873 | uint16_t in_port; |
874 | size_t datalen = socket_recv4(g_socket_in, (char *)g_inbuffer, LIVESYNC_INCOMING_BUFFSIZE, 12 + (char *)in_ip, &in_port); | ||
830 | 875 | ||
831 | /* Expect at least tracker id and packet type */ | 876 | /* Expect at least tracker id and packet type */ |
832 | if( datalen <= (ssize_t)(sizeof( g_tracker_id ) + sizeof( uint32_t )) ) | 877 | if (datalen <= (ssize_t)(sizeof(g_tracker_id) + sizeof(uint32_t))) |
833 | continue; | 878 | continue; |
834 | if( !memcmp( g_inbuffer, &g_tracker_id, sizeof( g_tracker_id ) ) ) { | 879 | if (!memcmp(g_inbuffer, &g_tracker_id, sizeof(g_tracker_id))) { |
835 | /* drop packet coming from ourselves */ | 880 | /* drop packet coming from ourselves */ |
836 | continue; | 881 | continue; |
837 | } | 882 | } |
838 | switch( uint32_read_big( sizeof( g_tracker_id ) + (char*)g_inbuffer ) ) { | 883 | switch (uint32_read_big((char *)g_inbuffer + sizeof(g_tracker_id))) { |
839 | case OT_SYNC_PEER: | 884 | case OT_SYNC_PEER4: |
840 | livesync_handle_peersync( datalen ); | 885 | livesync_handle_peersync(datalen, OT_PEER_SIZE4); |
886 | break; | ||
887 | case OT_SYNC_PEER6: | ||
888 | livesync_handle_peersync(datalen, OT_PEER_SIZE6); | ||
841 | break; | 889 | break; |
842 | default: | 890 | default: |
843 | // fprintf( stderr, "Received an unknown live sync packet type %u.\n", uint32_read_big( sizeof( g_tracker_id ) + (char*)g_inbuffer ) ); | 891 | // fprintf( stderr, "Received an unknown live sync packet type %u.\n", uint32_read_big( sizeof( g_tracker_id ) + (char*)g_inbuffer ) ); |
diff --git a/scan_urlencoded_query.c b/scan_urlencoded_query.c index a4f89c2..38d544a 100644 --- a/scan_urlencoded_query.c +++ b/scan_urlencoded_query.c | |||
@@ -45,37 +45,45 @@ static const unsigned char is_unreserved[256] = { | |||
45 | 45 | ||
46 | /* Do a fast nibble to hex representation conversion */ | 46 | /* Do a fast nibble to hex representation conversion */ |
47 | static unsigned char fromhex(unsigned char x) { | 47 | static unsigned char fromhex(unsigned char x) { |
48 | x-='0'; if( x<=9) return x; | 48 | x -= '0'; |
49 | x&=~0x20; x-='A'-'0'; | 49 | if (x <= 9) |
50 | if( x<6 ) return x+10; | 50 | return x; |
51 | x &= ~0x20; | ||
52 | x -= 'A' - '0'; | ||
53 | if (x < 6) | ||
54 | return x + 10; | ||
51 | return 0xff; | 55 | return 0xff; |
52 | } | 56 | } |
53 | 57 | ||
54 | /* Skip the value of a param=value pair */ | 58 | /* Skip the value of a param=value pair */ |
55 | void scan_urlencoded_skipvalue( char **string ) { | 59 | void scan_urlencoded_skipvalue(char **string) { |
56 | const unsigned char* s=*(const unsigned char**) string; | 60 | const unsigned char *s = *(const unsigned char **)string; |
57 | unsigned char f; | 61 | unsigned char f; |
58 | 62 | ||
59 | /* Since we are asked to skip the 'value', we assume to stop at | 63 | /* Since we are asked to skip the 'value', we assume to stop at |
60 | terminators for a 'value' string position */ | 64 | terminators for a 'value' string position */ |
61 | while( ( f = is_unreserved[ *s++ ] ) & SCAN_SEARCHPATH_VALUE ); | 65 | while ((f = is_unreserved[*s++]) & SCAN_SEARCHPATH_VALUE) |
66 | ; | ||
62 | 67 | ||
63 | /* If we stopped at a hard terminator like \0 or \n, make the | 68 | /* If we stopped at a hard terminator like \0 or \n, make the |
64 | next scan_urlencoded_query encounter it again */ | 69 | next scan_urlencoded_query encounter it again */ |
65 | if( f & SCAN_SEARCHPATH_TERMINATOR ) --s; | 70 | if (f & SCAN_SEARCHPATH_TERMINATOR) |
71 | --s; | ||
66 | 72 | ||
67 | *string = (char*)s; | 73 | *string = (char *)s; |
68 | } | 74 | } |
69 | 75 | ||
70 | int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags) { | 76 | int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags) { |
71 | char *deststring = *string; | 77 | char *deststring = *string; |
72 | ssize_t match_length = scan_urlencoded_query(string, deststring, flags ); | 78 | ssize_t match_length = scan_urlencoded_query(string, deststring, flags); |
73 | 79 | ||
74 | if( match_length < 0 ) return match_length; | 80 | if (match_length < 0) |
75 | if( match_length == 0 ) return -3; | 81 | return match_length; |
82 | if (match_length == 0) | ||
83 | return -3; | ||
76 | 84 | ||
77 | while( keywords->key ) { | 85 | while (keywords->key) { |
78 | if( !strncmp( keywords->key, deststring, match_length ) && !keywords->key[match_length] ) | 86 | if (!strncmp(keywords->key, deststring, match_length) && !keywords->key[match_length]) |
79 | return keywords->value; | 87 | return keywords->value; |
80 | keywords++; | 88 | keywords++; |
81 | } | 89 | } |
@@ -84,60 +92,73 @@ int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCH | |||
84 | } | 92 | } |
85 | 93 | ||
86 | ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_FLAG flags) { | 94 | ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_FLAG flags) { |
87 | const unsigned char* s=*(const unsigned char**) string; | 95 | const unsigned char *s = *(const unsigned char **)string; |
88 | unsigned char *d = (unsigned char*)deststring; | 96 | unsigned char *d = (unsigned char *)deststring; |
89 | unsigned char b, c; | 97 | unsigned char b, c; |
90 | 98 | ||
91 | /* This is the main decoding loop. | 99 | /* This is the main decoding loop. |
92 | 'flag' determines, which characters are non-terminating in current context | 100 | 'flag' determines, which characters are non-terminating in current context |
93 | (ie. stop at '=' and '&' if scanning for a 'param'; stop at '?' if scanning for the path ) | 101 | (ie. stop at '=' and '&' if scanning for a 'param'; stop at '?' if scanning for the path ) |
94 | */ | 102 | */ |
95 | while( is_unreserved[ c = *s++ ] & flags ) { | 103 | while (is_unreserved[c = *s++] & flags) { |
96 | 104 | ||
97 | /* When encountering an url escaped character, try to decode */ | 105 | /* When encountering an url escaped character, try to decode */ |
98 | if( c=='%') { | 106 | if (c == '%') { |
99 | if( ( b = fromhex(*s++) ) == 0xff ) return -1; | 107 | if ((b = fromhex(*s++)) == 0xff) |
100 | if( ( c = fromhex(*s++) ) == 0xff ) return -1; | 108 | return -1; |
101 | c|=(b<<4); | 109 | if ((c = fromhex(*s++)) == 0xff) |
110 | return -1; | ||
111 | c |= (b << 4); | ||
102 | } | 112 | } |
103 | 113 | ||
104 | /* Write (possibly decoded) character to output */ | 114 | /* Write (possibly decoded) character to output */ |
105 | *d++ = c; | 115 | *d++ = c; |
106 | } | 116 | } |
107 | 117 | ||
108 | switch( c ) { | 118 | switch (c) { |
109 | case 0: case '\r': case '\n': case ' ': | 119 | case 0: |
120 | case '\r': | ||
121 | case '\n': | ||
122 | case ' ': | ||
110 | /* If we started scanning on a hard terminator, indicate we've finished */ | 123 | /* If we started scanning on a hard terminator, indicate we've finished */ |
111 | if( d == (unsigned char*)deststring ) return -2; | 124 | if (d == (unsigned char *)deststring) |
125 | return -2; | ||
112 | 126 | ||
113 | /* Else make the next call to scan_urlencoded_param encounter it again */ | 127 | /* Else make the next call to scan_urlencoded_param encounter it again */ |
114 | --s; | 128 | --s; |
115 | break; | 129 | break; |
116 | case '?': | 130 | case '?': |
117 | if( flags != SCAN_PATH ) return -1; | 131 | if (flags != SCAN_PATH) |
132 | return -1; | ||
118 | break; | 133 | break; |
119 | case '=': | 134 | case '=': |
120 | if( flags != SCAN_SEARCHPATH_PARAM ) return -1; | 135 | if (flags != SCAN_SEARCHPATH_PARAM) |
136 | return -1; | ||
121 | break; | 137 | break; |
122 | case '&': | 138 | case '&': |
123 | if( flags == SCAN_PATH ) return -1; | 139 | if (flags == SCAN_PATH) |
124 | if( flags == SCAN_SEARCHPATH_PARAM ) --s; | 140 | return -1; |
141 | if (flags == SCAN_SEARCHPATH_PARAM) | ||
142 | --s; | ||
125 | break; | 143 | break; |
126 | default: | 144 | default: |
127 | return -1; | 145 | return -1; |
128 | } | 146 | } |
129 | 147 | ||
130 | *string = (char *)s; | 148 | *string = (char *)s; |
131 | return d - (unsigned char*)deststring; | 149 | return d - (unsigned char *)deststring; |
132 | } | 150 | } |
133 | 151 | ||
134 | ssize_t scan_fixed_int( char *data, size_t len, int *tmp ) { | 152 | ssize_t scan_fixed_int(char *data, size_t len, int *tmp) { |
135 | int minus = 0; | 153 | int minus = 0; |
136 | *tmp = 0; | 154 | *tmp = 0; |
137 | if( *data == '-' ) --len, ++data, ++minus; | 155 | if (*data == '-') |
138 | while( (len > 0) && (*data >= '0') && (*data <= '9') ) { --len; *tmp = 10**tmp + *data++-'0'; } | 156 | --len, ++data, ++minus; |
139 | if( minus ) *tmp = -*tmp; | 157 | while ((len > 0) && (*data >= '0') && (*data <= '9')) { |
158 | --len; | ||
159 | *tmp = 10 * *tmp + *data++ - '0'; | ||
160 | } | ||
161 | if (minus) | ||
162 | *tmp = -*tmp; | ||
140 | return len; | 163 | return len; |
141 | } | 164 | } |
142 | |||
143 | const char *g_version_scan_urlencoded_query_c = "$Source$: $Revision$\n"; | ||
diff --git a/scan_urlencoded_query.h b/scan_urlencoded_query.h index 06b91f5..74246e7 100644 --- a/scan_urlencoded_query.h +++ b/scan_urlencoded_query.h | |||
@@ -38,18 +38,18 @@ ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_F | |||
38 | or -2 for terminator found | 38 | or -2 for terminator found |
39 | or -3 for no keyword matched | 39 | or -3 for no keyword matched |
40 | */ | 40 | */ |
41 | int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags); | 41 | int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags); |
42 | 42 | ||
43 | /* string in: pointer to value of a param=value pair to skip | 43 | /* string in: pointer to value of a param=value pair to skip |
44 | out: pointer to next scan position on return | 44 | out: pointer to next scan position on return |
45 | */ | 45 | */ |
46 | void scan_urlencoded_skipvalue( char **string ); | 46 | void scan_urlencoded_skipvalue(char **string); |
47 | 47 | ||
48 | /* data pointer to len chars of string | 48 | /* data pointer to len chars of string |
49 | len length of chars in data to parse | 49 | len length of chars in data to parse |
50 | number number to receive result | 50 | number number to receive result |
51 | returns number of bytes not parsed, mostly !=0 means fail | 51 | returns number of bytes not parsed, mostly !=0 means fail |
52 | */ | 52 | */ |
53 | ssize_t scan_fixed_int( char *data, size_t len, int *number ); | 53 | ssize_t scan_fixed_int(char *data, size_t len, int *number); |
54 | 54 | ||
55 | #endif | 55 | #endif |
diff --git a/tests/testsuite2.sh b/tests/testsuite2.sh index c9a5a6a..da5181b 100644 --- a/tests/testsuite2.sh +++ b/tests/testsuite2.sh | |||
@@ -2,13 +2,21 @@ | |||
2 | 2 | ||
3 | while true; do | 3 | while true; do |
4 | request_string="GET /announce?info_hash=012345678901234567\ | 4 | request_string="GET /announce?info_hash=012345678901234567\ |
5 | %$(printf %02X $(( $RANDOM & 0xff )) )\ | 5 | $(printf %02X $(( $RANDOM & 0xff )) )\ |
6 | %$(printf %02X $(( $RANDOM & 0xff )) )\ | 6 | &ip=$(( $RANDOM & 0xff )).17.13.15&port=$(( $RANDOM & 0xff )) HTTP/1.0" |
7 | &ip=$(( $RANDOM & 0xff )).17.13.15&port=$(( $RANDOM & 0xff )) HTTP/1.0\n" | 7 | |
8 | 8 | # echo $request_string | |
9 | echo $request_string | 9 | # echo |
10 | echo | 10 | printf "%s\n\n" "$request_string" | nc 84.200.61.9 6969 | hexdump -C |
11 | echo $request_string | nc 23.23.23.237 6969 >/dev/null | 11 | |
12 | echo | 12 | request_string="GET /announce?info_hash=012345678901234567\ |
13 | $(printf %02X $(( $RANDOM & 0xff )) )\ | ||
14 | &ip=2001:1608:6:27::$(( $RANDOM & 0xff ))&port=$(( $RANDOM & 0xff )) HTTP/1.0" | ||
15 | printf "%s\n\n" "$request_string" | nc 2001:1608:6:27::9 6969 | hexdump -C | ||
16 | printf "%s\n\n" "$request_string" | ||
17 | |||
18 | request_string="GET /scrape?info_hash=012345678901234567\ | ||
19 | $(printf %02X $(( $RANDOM & 0xff )) ) HTTP/1.0" | ||
20 | printf "%s\n\n" "$request_string" | nc 2001:1608:6:27::9 6969 | hexdump -C | ||
13 | 21 | ||
14 | done | 22 | done |
diff --git a/trackerlogic.c b/trackerlogic.c index 719f8a2..04df544 100644 --- a/trackerlogic.c +++ b/trackerlogic.c | |||
@@ -4,454 +4,595 @@ | |||
4 | $id$ */ | 4 | $id$ */ |
5 | 5 | ||
6 | /* System */ | 6 | /* System */ |
7 | #include <stdlib.h> | ||
8 | #include <string.h> | ||
9 | #include <stdio.h> | ||
10 | #include <arpa/inet.h> | 7 | #include <arpa/inet.h> |
11 | #include <unistd.h> | ||
12 | #include <errno.h> | 8 | #include <errno.h> |
13 | #include <stdint.h> | 9 | #include <stdint.h> |
10 | #include <stdio.h> | ||
11 | #include <stdlib.h> | ||
12 | #include <string.h> | ||
13 | #include <unistd.h> | ||
14 | 14 | ||
15 | /* Libowfat */ | 15 | /* Libowfat */ |
16 | #include "array.h" | ||
16 | #include "byte.h" | 17 | #include "byte.h" |
17 | #include "io.h" | 18 | #include "io.h" |
18 | #include "iob.h" | 19 | #include "iob.h" |
19 | #include "array.h" | 20 | #include "ip6.h" |
20 | 21 | ||
21 | /* Opentracker */ | 22 | /* Opentracker */ |
22 | #include "trackerlogic.h" | ||
23 | #include "ot_mutex.h" | ||
24 | #include "ot_stats.h" | ||
25 | #include "ot_clean.h" | ||
26 | #include "ot_http.h" | ||
27 | #include "ot_accesslist.h" | 23 | #include "ot_accesslist.h" |
24 | #include "ot_clean.h" | ||
28 | #include "ot_fullscrape.h" | 25 | #include "ot_fullscrape.h" |
26 | #include "ot_http.h" | ||
29 | #include "ot_livesync.h" | 27 | #include "ot_livesync.h" |
28 | #include "ot_mutex.h" | ||
29 | #include "ot_stats.h" | ||
30 | #include "ot_vector.h" | ||
31 | #include "trackerlogic.h" | ||
30 | 32 | ||
31 | /* Forward declaration */ | 33 | /* Forward declaration */ |
32 | size_t return_peers_for_torrent( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ); | 34 | size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto); |
33 | 35 | ||
34 | void free_peerlist( ot_peerlist *peer_list ) { | 36 | void free_peerlist(ot_peerlist *peer_list) { |
35 | if( peer_list->peers.data ) { | 37 | if (peer_list->peers.data) { |
36 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { | 38 | if (OT_PEERLIST_HASBUCKETS(peer_list)) |
37 | ot_vector *bucket_list = (ot_vector*)(peer_list->peers.data); | 39 | vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size); |
38 | 40 | else | |
39 | while( peer_list->peers.size-- ) | 41 | free(peer_list->peers.data); |
40 | free( bucket_list++->data ); | ||
41 | } | ||
42 | free( peer_list->peers.data ); | ||
43 | } | 42 | } |
44 | free( peer_list ); | 43 | free(peer_list); |
45 | } | 44 | } |
46 | 45 | ||
47 | void add_torrent_from_saved_state( ot_hash hash, ot_time base, size_t down_count ) { | 46 | void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count) { |
48 | int exactmatch; | 47 | int exactmatch; |
49 | ot_torrent *torrent; | 48 | ot_torrent *torrent; |
50 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | 49 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash); |
51 | 50 | ||
52 | if( !accesslist_hashisvalid( hash ) ) | 51 | if (!accesslist_hashisvalid(hash)) |
53 | return mutex_bucket_unlock_by_hash( hash, 0 ); | 52 | return mutex_bucket_unlock_by_hash(hash, 0); |
54 | 53 | ||
55 | torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 54 | torrent = vector_find_or_insert(torrents_list, (void *)hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
56 | if( !torrent || exactmatch ) | 55 | if (!torrent || exactmatch) |
57 | return mutex_bucket_unlock_by_hash( hash, 0 ); | 56 | return mutex_bucket_unlock_by_hash(hash, 0); |
58 | 57 | ||
59 | /* Create a new torrent entry, then */ | 58 | /* Create a new torrent entry, then */ |
60 | memcpy( torrent->hash, hash, sizeof(ot_hash) ); | 59 | byte_zero(torrent, sizeof(ot_torrent)); |
60 | memcpy(torrent->hash, hash, sizeof(ot_hash)); | ||
61 | 61 | ||
62 | if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { | 62 | if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) { |
63 | vector_remove_torrent( torrents_list, torrent ); | 63 | vector_remove_torrent(torrents_list, torrent); |
64 | return mutex_bucket_unlock_by_hash( hash, 0 ); | 64 | return mutex_bucket_unlock_by_hash(hash, 0); |
65 | } | 65 | } |
66 | 66 | ||
67 | byte_zero( torrent->peer_list, sizeof( ot_peerlist ) ); | 67 | byte_zero(torrent->peer_list6, sizeof(ot_peerlist)); |
68 | torrent->peer_list->base = base; | 68 | byte_zero(torrent->peer_list4, sizeof(ot_peerlist)); |
69 | torrent->peer_list->down_count = down_count; | 69 | torrent->peer_list6->base = base; |
70 | torrent->peer_list4->base = base; | ||
71 | torrent->peer_list6->down_count = down_count; | ||
72 | torrent->peer_list4->down_count = down_count; | ||
70 | 73 | ||
71 | return mutex_bucket_unlock_by_hash( hash, 1 ); | 74 | return mutex_bucket_unlock_by_hash(hash, 1); |
72 | } | 75 | } |
73 | 76 | ||
74 | size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount ) { | 77 | size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount) { |
75 | int exactmatch, delta_torrentcount = 0; | 78 | int exactmatch, delta_torrentcount = 0; |
76 | ot_torrent *torrent; | 79 | ot_torrent *torrent; |
77 | ot_peer *peer_dest; | 80 | ot_peer *peer_dest; |
78 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash ); | 81 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash); |
79 | 82 | ot_peerlist *peer_list; | |
80 | if( !accesslist_hashisvalid( *ws->hash ) ) { | 83 | size_t peer_size; /* initialized in next line */ |
81 | mutex_bucket_unlock_by_hash( *ws->hash, 0 ); | 84 | ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size); |
82 | if( proto == FLAG_TCP ) { | 85 | |
86 | if (!accesslist_hashisvalid(*ws->hash)) { | ||
87 | mutex_bucket_unlock_by_hash(*ws->hash, 0); | ||
88 | if (proto == FLAG_TCP) { | ||
83 | const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e"; | 89 | const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e"; |
84 | memcpy( ws->reply, invalid_hash, strlen( invalid_hash ) ); | 90 | memcpy(ws->reply, invalid_hash, strlen(invalid_hash)); |
85 | return strlen( invalid_hash ); | 91 | return strlen(invalid_hash); |
86 | } | 92 | } |
87 | return 0; | 93 | return 0; |
88 | } | 94 | } |
89 | 95 | ||
90 | torrent = vector_find_or_insert( torrents_list, (void*)ws->hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 96 | torrent = vector_find_or_insert(torrents_list, (void *)ws->hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
91 | if( !torrent ) { | 97 | if (!torrent) { |
92 | mutex_bucket_unlock_by_hash( *ws->hash, 0 ); | 98 | mutex_bucket_unlock_by_hash(*ws->hash, 0); |
93 | return 0; | 99 | return 0; |
94 | } | 100 | } |
95 | 101 | ||
96 | if( !exactmatch ) { | 102 | if (!exactmatch) { |
97 | /* Create a new torrent entry, then */ | 103 | /* Create a new torrent entry, then */ |
98 | memcpy( torrent->hash, *ws->hash, sizeof(ot_hash) ); | 104 | byte_zero(torrent, sizeof(ot_torrent)); |
105 | memcpy(torrent->hash, *ws->hash, sizeof(ot_hash)); | ||
99 | 106 | ||
100 | if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { | 107 | if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) { |
101 | vector_remove_torrent( torrents_list, torrent ); | 108 | vector_remove_torrent(torrents_list, torrent); |
102 | mutex_bucket_unlock_by_hash( *ws->hash, 0 ); | 109 | mutex_bucket_unlock_by_hash(*ws->hash, 0); |
103 | return 0; | 110 | return 0; |
104 | } | 111 | } |
105 | 112 | ||
106 | byte_zero( torrent->peer_list, sizeof( ot_peerlist ) ); | 113 | byte_zero(torrent->peer_list6, sizeof(ot_peerlist)); |
114 | byte_zero(torrent->peer_list4, sizeof(ot_peerlist)); | ||
107 | delta_torrentcount = 1; | 115 | delta_torrentcount = 1; |
108 | } else | 116 | } else |
109 | clean_single_torrent( torrent ); | 117 | clean_single_torrent(torrent); |
110 | 118 | ||
111 | torrent->peer_list->base = g_now_minutes; | 119 | torrent->peer_list6->base = g_now_minutes; |
120 | torrent->peer_list4->base = g_now_minutes; | ||
121 | |||
122 | peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; | ||
112 | 123 | ||
113 | /* Check for peer in torrent */ | 124 | /* Check for peer in torrent */ |
114 | peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), &ws->peer, &exactmatch ); | 125 | peer_dest = vector_find_or_insert_peer(&(peer_list->peers), peer_src, peer_size, &exactmatch); |
115 | if( !peer_dest ) { | 126 | if (!peer_dest) { |
116 | mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); | 127 | mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount); |
117 | return 0; | 128 | return 0; |
118 | } | 129 | } |
119 | 130 | ||
120 | /* Tell peer that it's fresh */ | 131 | /* Tell peer that it's fresh */ |
121 | OT_PEERTIME( &ws->peer ) = 0; | 132 | OT_PEERTIME(ws->peer, OT_PEER_SIZE6) = 0; |
122 | 133 | ||
123 | /* Sanitize flags: Whoever claims to have completed download, must be a seeder */ | 134 | /* Sanitize flags: Whoever claims to have completed download, must be a seeder */ |
124 | if( ( OT_PEERFLAG( &ws->peer ) & ( PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING ) ) == PEER_FLAG_COMPLETED ) | 135 | if ((OT_PEERFLAG(ws->peer) & (PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING)) == PEER_FLAG_COMPLETED) |
125 | OT_PEERFLAG( &ws->peer ) ^= PEER_FLAG_COMPLETED; | 136 | OT_PEERFLAG(ws->peer) ^= PEER_FLAG_COMPLETED; |
126 | 137 | ||
127 | /* If we hadn't had a match create peer there */ | 138 | /* If we hadn't had a match create peer there */ |
128 | if( !exactmatch ) { | 139 | if (!exactmatch) { |
129 | 140 | ||
130 | #ifdef WANT_SYNC_LIVE | 141 | #ifdef WANT_SYNC_LIVE |
131 | if( proto == FLAG_MCA ) | 142 | if (proto == FLAG_MCA) |
132 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_FROM_SYNC; | 143 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_FROM_SYNC; |
133 | else | 144 | else |
134 | livesync_tell( ws ); | 145 | livesync_tell(ws); |
135 | #endif | 146 | #endif |
136 | 147 | ||
137 | torrent->peer_list->peer_count++; | 148 | peer_list->peer_count++; |
138 | if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) { | 149 | if (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED) { |
139 | torrent->peer_list->down_count++; | 150 | peer_list->down_count++; |
140 | stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws ); | 151 | stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws); |
141 | } | 152 | } |
142 | if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) | 153 | if (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING) |
143 | torrent->peer_list->seed_count++; | 154 | peer_list->seed_count++; |
144 | 155 | ||
145 | } else { | 156 | } else { |
146 | stats_issue_event( EVENT_RENEW, 0, OT_PEERTIME( peer_dest ) ); | 157 | stats_issue_event(EVENT_RENEW, 0, OT_PEERTIME(peer_dest, peer_size)); |
147 | #ifdef WANT_SPOT_WOODPECKER | 158 | #ifdef WANT_SPOT_WOODPECKER |
148 | if( ( OT_PEERTIME(peer_dest) > 0 ) && ( OT_PEERTIME(peer_dest) < 20 ) ) | 159 | if ((OT_PEERTIME(peer_dest, peer_size) > 0) && (OT_PEERTIME(peer_dest, peer_size) < 20)) |
149 | stats_issue_event( EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer ); | 160 | stats_issue_event(EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer); |
150 | #endif | 161 | #endif |
151 | #ifdef WANT_SYNC_LIVE | 162 | #ifdef WANT_SYNC_LIVE |
152 | /* Won't live sync peers that come back too fast. Only exception: | 163 | /* Won't live sync peers that come back too fast. Only exception: |
153 | fresh "completed" reports */ | 164 | fresh "completed" reports */ |
154 | if( proto != FLAG_MCA ) { | 165 | if (proto != FLAG_MCA) { |
155 | if( OT_PEERTIME( peer_dest ) > OT_CLIENT_SYNC_RENEW_BOUNDARY || | 166 | if (OT_PEERTIME(peer_dest, peer_size) > OT_CLIENT_SYNC_RENEW_BOUNDARY || |
156 | ( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) ) ) | 167 | (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED))) |
157 | livesync_tell( ws ); | 168 | livesync_tell(ws); |
158 | } | 169 | } |
159 | #endif | 170 | #endif |
160 | 171 | ||
161 | if( (OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && !(OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) ) | 172 | if ((OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && !(OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING)) |
162 | torrent->peer_list->seed_count--; | 173 | peer_list->seed_count--; |
163 | if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) ) | 174 | if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING)) |
164 | torrent->peer_list->seed_count++; | 175 | peer_list->seed_count++; |
165 | if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) ) { | 176 | if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED)) { |
166 | torrent->peer_list->down_count++; | 177 | peer_list->down_count++; |
167 | stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws ); | 178 | stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws); |
168 | } | 179 | } |
169 | if( OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) | 180 | if (OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) |
170 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED; | 181 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED; |
171 | } | 182 | } |
172 | 183 | ||
173 | memcpy( peer_dest, &ws->peer, sizeof(ot_peer) ); | 184 | memcpy(peer_dest, peer_src, peer_size); |
174 | #ifdef WANT_SYNC | 185 | #ifdef WANT_SYNC |
175 | if( proto == FLAG_MCA ) { | 186 | if (proto == FLAG_MCA) { |
176 | mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); | 187 | mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount); |
177 | return 0; | 188 | return 0; |
178 | } | 189 | } |
179 | #endif | 190 | #endif |
180 | 191 | ||
181 | ws->reply_size = return_peers_for_torrent( ws, torrent, amount, ws->reply, proto ); | 192 | ws->reply_size = return_peers_for_torrent(ws, torrent, amount, ws->reply, proto); |
182 | mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); | 193 | mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount); |
183 | return ws->reply_size; | 194 | return ws->reply_size; |
184 | } | 195 | } |
185 | 196 | ||
186 | static size_t return_peers_all( ot_peerlist *peer_list, char *reply ) { | 197 | static size_t return_peers_all(ot_peerlist *peer_list, size_t peer_size, char *reply) { |
187 | unsigned int bucket, num_buckets = 1; | 198 | unsigned int bucket, num_buckets = 1; |
188 | ot_vector * bucket_list = &peer_list->peers; | 199 | ot_vector *bucket_list = &peer_list->peers; |
189 | size_t result = OT_PEER_COMPARE_SIZE * peer_list->peer_count; | 200 | size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); |
190 | char * r_end = reply + result; | 201 | size_t result = compare_size * peer_list->peer_count; |
202 | char *r_end = reply + result; | ||
191 | 203 | ||
192 | if( OT_PEERLIST_HASBUCKETS(peer_list) ) { | 204 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
193 | num_buckets = bucket_list->size; | 205 | num_buckets = bucket_list->size; |
194 | bucket_list = (ot_vector *)bucket_list->data; | 206 | bucket_list = (ot_vector *)bucket_list->data; |
195 | } | 207 | } |
196 | 208 | ||
197 | for( bucket = 0; bucket<num_buckets; ++bucket ) { | 209 | for (bucket = 0; bucket < num_buckets; ++bucket) { |
198 | ot_peer * peers = (ot_peer*)bucket_list[bucket].data; | 210 | ot_peer *peers = bucket_list[bucket].data; |
199 | size_t peer_count = bucket_list[bucket].size; | 211 | size_t peer_count = bucket_list[bucket].size; |
200 | while( peer_count-- ) { | 212 | while (peer_count--) { |
201 | if( OT_PEERFLAG(peers) & PEER_FLAG_SEEDING ) { | 213 | if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING) { |
202 | r_end-=OT_PEER_COMPARE_SIZE; | 214 | r_end -= compare_size; |
203 | memcpy(r_end,peers++,OT_PEER_COMPARE_SIZE); | 215 | memcpy(r_end, peers, compare_size); |
204 | } else { | 216 | } else { |
205 | memcpy(reply,peers++,OT_PEER_COMPARE_SIZE); | 217 | memcpy(reply, peers, compare_size); |
206 | reply+=OT_PEER_COMPARE_SIZE; | 218 | reply += compare_size; |
207 | } | 219 | } |
220 | peers += peer_size; | ||
208 | } | 221 | } |
209 | } | 222 | } |
210 | return result; | 223 | return result; |
211 | } | 224 | } |
212 | 225 | ||
213 | static size_t return_peers_selection( struct ot_workstruct *ws, ot_peerlist *peer_list, size_t amount, char *reply ) { | 226 | static size_t return_peers_selection(struct ot_workstruct *ws, ot_peerlist *peer_list, size_t peer_size, size_t amount, char *reply) { |
214 | unsigned int bucket_offset, bucket_index = 0, num_buckets = 1; | 227 | unsigned int bucket_offset, bucket_index = 0, num_buckets = 1; |
215 | ot_vector * bucket_list = &peer_list->peers; | 228 | ot_vector *bucket_list = &peer_list->peers; |
216 | unsigned int shifted_pc = peer_list->peer_count; | 229 | unsigned int shifted_pc = peer_list->peer_count; |
217 | unsigned int shifted_step = 0; | 230 | unsigned int shifted_step = 0; |
218 | unsigned int shift = 0; | 231 | unsigned int shift = 0; |
219 | size_t result = OT_PEER_COMPARE_SIZE * amount; | 232 | size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); |
220 | char * r_end = reply + result; | 233 | size_t result = compare_size * amount; |
234 | char *r_end = reply + result; | ||
221 | 235 | ||
222 | if( OT_PEERLIST_HASBUCKETS(peer_list) ) { | 236 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
223 | num_buckets = bucket_list->size; | 237 | num_buckets = bucket_list->size; |
224 | bucket_list = (ot_vector *)bucket_list->data; | 238 | bucket_list = (ot_vector *)bucket_list->data; |
225 | } | 239 | } |
226 | 240 | ||
227 | /* Make fixpoint arithmetic as exact as possible */ | 241 | /* Make fixpoint arithmetic as exact as possible */ |
228 | #define MAXPRECBIT (1<<(8*sizeof(int)-3)) | 242 | #define MAXPRECBIT (1 << (8 * sizeof(int) - 3)) |
229 | while( !(shifted_pc & MAXPRECBIT ) ) { shifted_pc <<= 1; shift++; } | 243 | while (!(shifted_pc & MAXPRECBIT)) { |
230 | shifted_step = shifted_pc/amount; | 244 | shifted_pc <<= 1; |
245 | shift++; | ||
246 | } | ||
247 | shifted_step = shifted_pc / amount; | ||
231 | #undef MAXPRECBIT | 248 | #undef MAXPRECBIT |
232 | 249 | ||
233 | /* Initialize somewhere in the middle of peers so that | 250 | /* Initialize somewhere in the middle of peers so that |
234 | fixpoint's aliasing doesn't alway miss the same peers */ | 251 | fixpoint's aliasing doesn't alway miss the same peers */ |
235 | bucket_offset = nrand48(ws->rand48_state) % peer_list->peer_count; | 252 | bucket_offset = nrand48(ws->rand48_state) % peer_list->peer_count; |
236 | 253 | ||
237 | while( amount-- ) { | 254 | while (amount--) { |
238 | ot_peer * peer; | 255 | ot_peer *peer; |
239 | 256 | ||
240 | /* This is the aliased, non shifted range, next value may fall into */ | 257 | /* This is the aliased, non shifted range, next value may fall into */ |
241 | unsigned int diff = ( ( ( amount + 1 ) * shifted_step ) >> shift ) - | 258 | unsigned int diff = (((amount + 1) * shifted_step) >> shift) - ((amount * shifted_step) >> shift); |
242 | ( ( amount * shifted_step ) >> shift ); | 259 | bucket_offset += 1 + nrand48(ws->rand48_state) % diff; |
243 | bucket_offset += 1 + nrand48(ws->rand48_state) % diff; | ||
244 | 260 | ||
245 | while( bucket_offset >= bucket_list[bucket_index].size ) { | 261 | while (bucket_offset >= bucket_list[bucket_index].size) { |
246 | bucket_offset -= bucket_list[bucket_index].size; | 262 | bucket_offset -= bucket_list[bucket_index].size; |
247 | bucket_index = ( bucket_index + 1 ) % num_buckets; | 263 | bucket_index = (bucket_index + 1) % num_buckets; |
248 | } | 264 | } |
249 | peer = ((ot_peer*)bucket_list[bucket_index].data) + bucket_offset; | 265 | peer = bucket_list[bucket_index].data + peer_size * bucket_offset; |
250 | if( OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) { | 266 | if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_SEEDING) { |
251 | r_end-=OT_PEER_COMPARE_SIZE; | 267 | r_end -= compare_size; |
252 | memcpy(r_end,peer,OT_PEER_COMPARE_SIZE); | 268 | memcpy(r_end, peer, compare_size); |
253 | } else { | 269 | } else { |
254 | memcpy(reply,peer,OT_PEER_COMPARE_SIZE); | 270 | memcpy(reply, peer, compare_size); |
255 | reply+=OT_PEER_COMPARE_SIZE; | 271 | reply += compare_size; |
256 | } | 272 | } |
257 | } | 273 | } |
258 | return result; | 274 | return result; |
259 | } | 275 | } |
260 | 276 | ||
261 | /* Compiles a list of random peers for a torrent | 277 | static size_t return_peers_for_torrent_udp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) { |
262 | * reply must have enough space to hold 92+6*amount bytes | 278 | char *r = reply; |
263 | * does not yet check not to return self | 279 | size_t peer_size = peer_size_from_peer6(&ws->peer); |
264 | */ | 280 | ot_peerlist *peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; |
265 | size_t return_peers_for_torrent( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ) { | 281 | size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; |
266 | ot_peerlist *peer_list = torrent->peer_list; | 282 | size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; |
267 | char *r = reply; | 283 | |
268 | 284 | if (amount > peer_list->peer_count) | |
269 | if( amount > peer_list->peer_count ) | ||
270 | amount = peer_list->peer_count; | 285 | amount = peer_list->peer_count; |
271 | 286 | ||
272 | if( proto == FLAG_TCP ) { | 287 | *(uint32_t *)(r + 0) = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM); |
273 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; | 288 | *(uint32_t *)(r + 4) = htonl(peer_count - seed_count); |
274 | r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie" PEERS_BENCODED "%zd:", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count, erval, erval/2, OT_PEER_COMPARE_SIZE*amount ); | 289 | *(uint32_t *)(r + 8) = htonl(seed_count); |
275 | } else { | 290 | r += 12; |
276 | *(uint32_t*)(r+0) = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); | 291 | |
277 | *(uint32_t*)(r+4) = htonl( peer_list->peer_count - peer_list->seed_count ); | 292 | if (amount) { |
278 | *(uint32_t*)(r+8) = htonl( peer_list->seed_count ); | 293 | if (amount == peer_list->peer_count) |
279 | r += 12; | 294 | r += return_peers_all(peer_list, peer_size, r); |
295 | else | ||
296 | r += return_peers_selection(ws, peer_list, peer_size, amount, r); | ||
297 | } | ||
298 | return r - reply; | ||
299 | } | ||
300 | |||
301 | static size_t return_peers_for_torrent_tcp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) { | ||
302 | char *r = reply; | ||
303 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; | ||
304 | size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; | ||
305 | size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count; | ||
306 | size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - seed_count; | ||
307 | |||
308 | /* Simple case: amount of peers in both lists is less than requested, here we return all results */ | ||
309 | size_t amount_v4 = torrent->peer_list4->peer_count; | ||
310 | size_t amount_v6 = torrent->peer_list6->peer_count; | ||
311 | |||
312 | /* Complex case: both lists have more than enough entries and we need to split between v4 and v6 clients */ | ||
313 | if (amount_v4 + amount_v6 > amount) { | ||
314 | size_t amount_left, percent_v6 = 0, percent_v4 = 0, left_v6, left_v4; | ||
315 | const size_t SCALE = 1024; | ||
316 | |||
317 | /* If possible, fill at least a quarter of peer from each family */ | ||
318 | if (amount / 4 <= amount_v4) | ||
319 | amount_v4 = amount / 4; | ||
320 | if (amount / 4 <= amount_v6) | ||
321 | amount_v6 = amount / 4; | ||
322 | |||
323 | /* Fill the rest according to which family's pool provides more peers */ | ||
324 | amount_left = amount - (amount_v4 + amount_v6); | ||
325 | |||
326 | left_v4 = torrent->peer_list4->peer_count - amount_v4; | ||
327 | left_v6 = torrent->peer_list6->peer_count - amount_v6; | ||
328 | |||
329 | if (left_v4 + left_v6) { | ||
330 | percent_v4 = (SCALE * left_v4) / (left_v4 + left_v6); | ||
331 | percent_v6 = (SCALE * left_v6) / (left_v4 + left_v6); | ||
332 | } | ||
333 | |||
334 | amount_v4 += (amount_left * percent_v4) / SCALE; | ||
335 | amount_v6 += (amount_left * percent_v6) / SCALE; | ||
336 | |||
337 | /* Integer division rounding can leave out a peer */ | ||
338 | if (amount_v4 + amount_v6 < amount && amount_v6 < torrent->peer_list6->peer_count) | ||
339 | ++amount_v6; | ||
340 | if (amount_v4 + amount_v6 < amount && amount_v4 < torrent->peer_list4->peer_count) | ||
341 | ++amount_v4; | ||
280 | } | 342 | } |
281 | 343 | ||
282 | if( amount ) { | 344 | r += |
283 | if( amount == peer_list->peer_count ) | 345 | sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie", seed_count, down_count, peer_count, erval, erval / 2); |
284 | r += return_peers_all( peer_list, r ); | 346 | |
347 | if (amount_v4) { | ||
348 | r += sprintf(r, PEERS_BENCODED4 "%zd:", OT_PEER_COMPARE_SIZE4 * amount_v4); | ||
349 | if (amount_v4 == torrent->peer_list4->peer_count) | ||
350 | r += return_peers_all(torrent->peer_list4, OT_PEER_SIZE4, r); | ||
351 | else | ||
352 | r += return_peers_selection(ws, torrent->peer_list4, OT_PEER_SIZE4, amount_v4, r); | ||
353 | } | ||
354 | |||
355 | if (amount_v6) { | ||
356 | r += sprintf(r, PEERS_BENCODED6 "%zd:", OT_PEER_COMPARE_SIZE6 * amount_v6); | ||
357 | if (amount_v6 == torrent->peer_list6->peer_count) | ||
358 | r += return_peers_all(torrent->peer_list6, OT_PEER_SIZE6, r); | ||
285 | else | 359 | else |
286 | r += return_peers_selection( ws, peer_list, amount, r ); | 360 | r += return_peers_selection(ws, torrent->peer_list6, OT_PEER_SIZE6, amount_v6, r); |
287 | } | 361 | } |
288 | 362 | ||
289 | if( proto == FLAG_TCP ) | 363 | *r++ = 'e'; |
290 | *r++ = 'e'; | ||
291 | 364 | ||
292 | return r - reply; | 365 | return r - reply; |
293 | } | 366 | } |
294 | 367 | ||
368 | /* Compiles a list of random peers for a torrent | ||
369 | * Reply must have enough space to hold: | ||
370 | * 92 + 6 * amount bytes for TCP/IPv4 | ||
371 | * 92 + 18 * amount bytes for TCP/IPv6 | ||
372 | * 12 + 6 * amount bytes for UDP/IPv4 | ||
373 | * 12 + 18 * amount bytes for UDP/IPv6 | ||
374 | * Does not yet check not to return self | ||
375 | */ | ||
376 | size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto) { | ||
377 | return proto == FLAG_TCP ? return_peers_for_torrent_tcp(ws, torrent, amount, reply) : return_peers_for_torrent_udp(ws, torrent, amount, reply); | ||
378 | } | ||
379 | |||
295 | /* Fetches scrape info for a specific torrent */ | 380 | /* Fetches scrape info for a specific torrent */ |
296 | size_t return_udp_scrape_for_torrent( ot_hash hash, char *reply ) { | 381 | size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply) { |
297 | int exactmatch, delta_torrentcount = 0; | 382 | int exactmatch, delta_torrentcount = 0; |
298 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | 383 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash); |
299 | ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 384 | ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
300 | 385 | ||
301 | if( !exactmatch ) { | 386 | if (!exactmatch) { |
302 | memset( reply, 0, 12); | 387 | memset(reply, 0, 12); |
303 | } else { | 388 | } else { |
304 | uint32_t *r = (uint32_t*) reply; | 389 | uint32_t *r = (uint32_t *)reply; |
305 | 390 | ||
306 | if( clean_single_torrent( torrent ) ) { | 391 | if (clean_single_torrent(torrent)) { |
307 | vector_remove_torrent( torrents_list, torrent ); | 392 | vector_remove_torrent(torrents_list, torrent); |
308 | memset( reply, 0, 12); | 393 | memset(reply, 0, 12); |
309 | delta_torrentcount = -1; | 394 | delta_torrentcount = -1; |
310 | } else { | 395 | } else { |
311 | r[0] = htonl( torrent->peer_list->seed_count ); | 396 | r[0] = htonl(torrent->peer_list6->seed_count + torrent->peer_list4->seed_count); |
312 | r[1] = htonl( torrent->peer_list->down_count ); | 397 | r[1] = htonl(torrent->peer_list6->down_count + torrent->peer_list4->down_count); |
313 | r[2] = htonl( torrent->peer_list->peer_count-torrent->peer_list->seed_count ); | 398 | r[2] = htonl(torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count); |
314 | } | 399 | } |
315 | } | 400 | } |
316 | mutex_bucket_unlock_by_hash( hash, delta_torrentcount ); | 401 | mutex_bucket_unlock_by_hash(hash, delta_torrentcount); |
317 | return 12; | 402 | return 12; |
318 | } | 403 | } |
319 | 404 | ||
320 | /* Fetches scrape info for a specific torrent */ | 405 | /* Fetches scrape info for a specific torrent */ |
321 | size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *reply ) { | 406 | size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply) { |
322 | char *r = reply; | 407 | char *r = reply; |
323 | int exactmatch, i; | 408 | int exactmatch, i; |
324 | 409 | ||
325 | r += sprintf( r, "d5:filesd" ); | 410 | r += sprintf(r, "d5:filesd"); |
326 | 411 | ||
327 | for( i=0; i<amount; ++i ) { | 412 | for (i = 0; i < amount; ++i) { |
328 | int delta_torrentcount = 0; | 413 | int delta_torrentcount = 0; |
329 | ot_hash *hash = hash_list + i; | 414 | ot_hash const *hash = hash_list + i; |
330 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( *hash ); | 415 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(*hash); |
331 | ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 416 | ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
332 | 417 | ||
333 | if( exactmatch ) { | 418 | if (exactmatch) { |
334 | if( clean_single_torrent( torrent ) ) { | 419 | if (clean_single_torrent(torrent)) { |
335 | vector_remove_torrent( torrents_list, torrent ); | 420 | vector_remove_torrent(torrents_list, torrent); |
336 | delta_torrentcount = -1; | 421 | delta_torrentcount = -1; |
337 | } else { | 422 | } else { |
338 | *r++='2';*r++='0';*r++=':'; | 423 | *r++ = '2'; |
339 | memcpy( r, hash, sizeof(ot_hash) ); r+=sizeof(ot_hash); | 424 | *r++ = '0'; |
340 | r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", | 425 | *r++ = ':'; |
341 | torrent->peer_list->seed_count, torrent->peer_list->down_count, torrent->peer_list->peer_count-torrent->peer_list->seed_count ); | 426 | memcpy(r, hash, sizeof(ot_hash)); |
427 | r += sizeof(ot_hash); | ||
428 | r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", torrent->peer_list6->seed_count + torrent->peer_list4->seed_count, | ||
429 | torrent->peer_list6->down_count + torrent->peer_list4->down_count, | ||
430 | torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count); | ||
342 | } | 431 | } |
343 | } | 432 | } |
344 | mutex_bucket_unlock_by_hash( *hash, delta_torrentcount ); | 433 | mutex_bucket_unlock_by_hash(*hash, delta_torrentcount); |
345 | } | 434 | } |
346 | 435 | ||
347 | *r++ = 'e'; *r++ = 'e'; | 436 | *r++ = 'e'; |
437 | *r++ = 'e'; | ||
348 | return r - reply; | 438 | return r - reply; |
349 | } | 439 | } |
350 | 440 | ||
351 | static ot_peerlist dummy_list; | 441 | static ot_peerlist dummy_list; |
352 | size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws ) { | 442 | size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws) { |
353 | int exactmatch; | 443 | int exactmatch; |
354 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash ); | 444 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash); |
355 | ot_torrent *torrent = binary_search( ws->hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 445 | ot_torrent *torrent = binary_search(ws->hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
356 | ot_peerlist *peer_list = &dummy_list; | 446 | ot_peerlist *peer_list = &dummy_list; |
447 | size_t peer_size; /* initialized in next line */ | ||
448 | ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size); | ||
449 | size_t peer_count = 0, seed_count = 0; | ||
357 | 450 | ||
358 | #ifdef WANT_SYNC_LIVE | 451 | #ifdef WANT_SYNC_LIVE |
359 | if( proto != FLAG_MCA ) { | 452 | if (proto != FLAG_MCA) { |
360 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED; | 453 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED; |
361 | livesync_tell( ws ); | 454 | livesync_tell(ws); |
362 | } | 455 | } |
363 | #endif | 456 | #endif |
364 | 457 | ||
365 | if( exactmatch ) { | 458 | if (exactmatch) { |
366 | peer_list = torrent->peer_list; | 459 | peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; |
367 | switch( vector_remove_peer( &peer_list->peers, &ws->peer ) ) { | 460 | switch (vector_remove_peer(&peer_list->peers, peer_src, peer_size)) { |
368 | case 2: peer_list->seed_count--; /* Intentional fallthrough */ | 461 | case 2: |
369 | case 1: peer_list->peer_count--; /* Intentional fallthrough */ | 462 | peer_list->seed_count--; /* Intentional fallthrough */ |
370 | default: break; | 463 | case 1: |
464 | peer_list->peer_count--; /* Intentional fallthrough */ | ||
465 | default: | ||
466 | break; | ||
371 | } | 467 | } |
468 | |||
469 | peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; | ||
470 | seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; | ||
372 | } | 471 | } |
373 | 472 | ||
374 | if( proto == FLAG_TCP ) { | 473 | if (proto == FLAG_TCP) { |
375 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; | 474 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; |
376 | ws->reply_size = sprintf( ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie" PEERS_BENCODED "0:e", peer_list->seed_count, peer_list->peer_count - peer_list->seed_count, erval, erval / 2 ); | 475 | ws->reply_size = sprintf(ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie%s0:e", seed_count, peer_count - seed_count, erval, |
476 | erval / 2, peer_size == OT_PEER_SIZE6 ? PEERS_BENCODED6 : PEERS_BENCODED4); | ||
377 | } | 477 | } |
378 | 478 | ||
379 | /* Handle UDP reply */ | 479 | /* Handle UDP reply */ |
380 | if( proto == FLAG_UDP ) { | 480 | if (proto == FLAG_UDP) { |
381 | ((uint32_t*)ws->reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); | 481 | ((uint32_t *)ws->reply)[2] = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM); |
382 | ((uint32_t*)ws->reply)[3] = htonl( peer_list->peer_count - peer_list->seed_count ); | 482 | ((uint32_t *)ws->reply)[3] = htonl(peer_count - seed_count); |
383 | ((uint32_t*)ws->reply)[4] = htonl( peer_list->seed_count); | 483 | ((uint32_t *)ws->reply)[4] = htonl(seed_count); |
384 | ws->reply_size = 20; | 484 | ws->reply_size = 20; |
385 | } | 485 | } |
386 | 486 | ||
387 | mutex_bucket_unlock_by_hash( *ws->hash, 0 ); | 487 | mutex_bucket_unlock_by_hash(*ws->hash, 0); |
388 | return ws->reply_size; | 488 | return ws->reply_size; |
389 | } | 489 | } |
390 | 490 | ||
391 | void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ) { | 491 | void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data) { |
392 | int bucket; | 492 | int bucket; |
393 | size_t j; | 493 | size_t j; |
394 | 494 | ||
395 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 495 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
396 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 496 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
397 | ot_torrent *torrents = (ot_torrent*)(torrents_list->data); | 497 | ot_torrent *torrents = (ot_torrent *)(torrents_list->data); |
398 | 498 | ||
399 | for( j=0; j<torrents_list->size; ++j ) | 499 | for (j = 0; j < torrents_list->size; ++j) |
400 | if( for_each( torrents + j, data ) ) | 500 | if (for_each(torrents + j, data)) |
401 | break; | 501 | break; |
402 | 502 | ||
403 | mutex_bucket_unlock( bucket, 0 ); | 503 | mutex_bucket_unlock(bucket, 0); |
404 | if( !g_opentracker_running ) return; | 504 | if (!g_opentracker_running) |
505 | return; | ||
506 | } | ||
507 | } | ||
508 | |||
509 | ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size) { | ||
510 | ot_ip6 *ip = (ot_ip6 *)peer; | ||
511 | if (!ip6_isv4mapped(ip)) { | ||
512 | *peer_size = OT_PEER_SIZE6; | ||
513 | return (ot_peer *)peer; | ||
514 | } | ||
515 | *peer_size = OT_PEER_SIZE4; | ||
516 | return (ot_peer *)(((uint8_t *)peer) + 12); | ||
517 | } | ||
518 | |||
519 | size_t peer_size_from_peer6(ot_peer6 *peer) { | ||
520 | ot_ip6 *ip = (ot_ip6 *)peer; | ||
521 | if (!ip6_isv4mapped(ip)) | ||
522 | return OT_PEER_SIZE6; | ||
523 | return OT_PEER_SIZE4; | ||
524 | } | ||
525 | |||
526 | #ifdef _DEBUG_RANDOMTORRENTS | ||
527 | void trackerlogic_add_random_torrents(size_t amount) { | ||
528 | struct ot_workstruct ws; | ||
529 | memset(&ws, 0, sizeof(ws)); | ||
530 | |||
531 | ws.inbuf = malloc(G_INBUF_SIZE); | ||
532 | ws.outbuf = malloc(G_OUTBUF_SIZE); | ||
533 | ws.reply = ws.outbuf; | ||
534 | ws.hash = (ot_hash *)ws.inbuf; | ||
535 | |||
536 | while (amount--) { | ||
537 | arc4random_buf(ws.hash, sizeof(ot_hash)); | ||
538 | arc4random_buf(&ws.peer, sizeof(ws.peer)); | ||
539 | |||
540 | OT_PEERFLAG(ws.peer) &= PEER_FLAG_SEEDING | PEER_FLAG_COMPLETED | PEER_FLAG_STOPPED; | ||
541 | |||
542 | add_peer_to_torrent_and_return_peers(FLAG_TCP, &ws, 1); | ||
405 | } | 543 | } |
544 | |||
545 | free(ws.inbuf); | ||
546 | free(ws.outbuf); | ||
406 | } | 547 | } |
548 | #endif | ||
407 | 549 | ||
408 | void exerr( char * message ) { | 550 | void exerr(char *message) { |
409 | fprintf( stderr, "%s\n", message ); | 551 | fprintf(stderr, "%s\n", message); |
410 | exit( 111 ); | 552 | exit(111); |
411 | } | 553 | } |
412 | 554 | ||
413 | void trackerlogic_init( ) { | 555 | void trackerlogic_init() { |
414 | g_tracker_id = random(); | 556 | g_tracker_id = random(); |
415 | 557 | ||
416 | if( !g_stats_path ) | 558 | if (!g_stats_path) |
417 | g_stats_path = "stats"; | 559 | g_stats_path = "stats"; |
418 | g_stats_path_len = strlen( g_stats_path ); | 560 | g_stats_path_len = strlen(g_stats_path); |
419 | 561 | ||
420 | /* Initialise background worker threads */ | 562 | /* Initialise background worker threads */ |
421 | mutex_init( ); | 563 | mutex_init(); |
422 | clean_init( ); | 564 | clean_init(); |
423 | fullscrape_init( ); | 565 | fullscrape_init(); |
424 | accesslist_init( ); | 566 | accesslist_init(); |
425 | livesync_init( ); | 567 | livesync_init(); |
426 | stats_init( ); | 568 | stats_init(); |
427 | } | 569 | } |
428 | 570 | ||
429 | void trackerlogic_deinit( void ) { | 571 | void trackerlogic_deinit(void) { |
430 | int bucket, delta_torrentcount = 0; | 572 | int bucket, delta_torrentcount = 0; |
431 | size_t j; | 573 | size_t j; |
432 | 574 | ||
433 | /* Free all torrents... */ | 575 | /* Free all torrents... */ |
434 | for(bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 576 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
435 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 577 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
436 | if( torrents_list->size ) { | 578 | if (torrents_list->size) { |
437 | for( j=0; j<torrents_list->size; ++j ) { | 579 | for (j = 0; j < torrents_list->size; ++j) { |
438 | ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + j; | 580 | ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + j; |
439 | free_peerlist( torrent->peer_list ); | 581 | free_peerlist(torrent->peer_list6); |
582 | free_peerlist(torrent->peer_list4); | ||
440 | delta_torrentcount -= 1; | 583 | delta_torrentcount -= 1; |
441 | } | 584 | } |
442 | free( torrents_list->data ); | 585 | free(torrents_list->data); |
443 | } | 586 | } |
444 | mutex_bucket_unlock( bucket, delta_torrentcount ); | 587 | mutex_bucket_unlock(bucket, delta_torrentcount); |
445 | } | 588 | } |
446 | 589 | ||
447 | /* Deinitialise background worker threads */ | 590 | /* Deinitialise background worker threads */ |
448 | stats_deinit( ); | 591 | stats_deinit(); |
449 | livesync_deinit( ); | 592 | livesync_deinit(); |
450 | accesslist_deinit( ); | 593 | accesslist_deinit(); |
451 | fullscrape_deinit( ); | 594 | fullscrape_deinit(); |
452 | clean_deinit( ); | 595 | clean_deinit(); |
453 | /* Release mutexes */ | 596 | /* Release mutexes */ |
454 | mutex_deinit( ); | 597 | mutex_deinit(); |
455 | } | 598 | } |
456 | |||
457 | const char *g_version_trackerlogic_c = "$Source$: $Revision$\n"; | ||
diff --git a/trackerlogic.h b/trackerlogic.h index 87b9138..022184d 100644 --- a/trackerlogic.h +++ b/trackerlogic.h | |||
@@ -6,11 +6,11 @@ | |||
6 | #ifndef OT_TRACKERLOGIC_H__ | 6 | #ifndef OT_TRACKERLOGIC_H__ |
7 | #define OT_TRACKERLOGIC_H__ | 7 | #define OT_TRACKERLOGIC_H__ |
8 | 8 | ||
9 | #include <sys/types.h> | ||
10 | #include <sys/time.h> | ||
11 | #include <time.h> | ||
12 | #include <stdint.h> | 9 | #include <stdint.h> |
13 | #include <stdlib.h> | 10 | #include <stdlib.h> |
11 | #include <sys/time.h> | ||
12 | #include <sys/types.h> | ||
13 | #include <time.h> | ||
14 | 14 | ||
15 | #if defined(__linux__) && defined(WANT_ARC4RANDOM) | 15 | #if defined(__linux__) && defined(WANT_ARC4RANDOM) |
16 | #include <bsd/stdlib.h> | 16 | #include <bsd/stdlib.h> |
@@ -22,118 +22,139 @@ | |||
22 | typedef uint8_t ot_hash[20]; | 22 | typedef uint8_t ot_hash[20]; |
23 | typedef time_t ot_time; | 23 | typedef time_t ot_time; |
24 | typedef char ot_ip6[16]; | 24 | typedef char ot_ip6[16]; |
25 | typedef struct { ot_ip6 address; int bits; } | 25 | typedef struct { |
26 | ot_net; | 26 | ot_ip6 address; |
27 | #ifdef WANT_V6 | 27 | int bits; |
28 | #define OT_IP_SIZE 16 | 28 | } ot_net; |
29 | #define PEERS_BENCODED "6:peers6" | 29 | /* List of peers should fit in a single UDP packet (around 1200 bytes) */ |
30 | #else | 30 | #define OT_MAX_PEERS_UDP6 66 |
31 | #define OT_IP_SIZE 4 | 31 | #define OT_MAX_PEERS_UDP4 200 |
32 | #define PEERS_BENCODED "5:peers" | 32 | |
33 | #endif | 33 | #define OT_IP_SIZE6 16 |
34 | #define OT_IP_SIZE4 4 | ||
35 | #define OT_PORT_SIZE 2 | ||
36 | #define OT_FLAG_SIZE 1 | ||
37 | #define OT_TIME_SIZE 1 | ||
34 | 38 | ||
35 | /* Some tracker behaviour tunable */ | 39 | /* Some tracker behaviour tunable */ |
36 | #define OT_CLIENT_TIMEOUT 30 | 40 | #define OT_CLIENT_TIMEOUT 30 |
37 | #define OT_CLIENT_TIMEOUT_CHECKINTERVAL 10 | 41 | #define OT_CLIENT_TIMEOUT_CHECKINTERVAL 10 |
38 | #define OT_CLIENT_TIMEOUT_SEND (60*15) | 42 | #define OT_CLIENT_TIMEOUT_SEND (60 * 15) |
39 | #define OT_CLIENT_REQUEST_INTERVAL (60*30) | 43 | #define OT_CLIENT_REQUEST_INTERVAL (60 * 30) |
40 | #define OT_CLIENT_REQUEST_VARIATION (60*6) | 44 | #define OT_CLIENT_REQUEST_VARIATION (60 * 6) |
41 | 45 | ||
42 | #define OT_TORRENT_TIMEOUT_HOURS 24 | 46 | #define OT_TORRENT_TIMEOUT_HOURS 24 |
43 | #define OT_TORRENT_TIMEOUT (60*OT_TORRENT_TIMEOUT_HOURS) | 47 | #define OT_TORRENT_TIMEOUT (60 * OT_TORRENT_TIMEOUT_HOURS) |
44 | 48 | ||
45 | #define OT_CLIENT_REQUEST_INTERVAL_RANDOM ( OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION/2 + (int)( nrand48(ws->rand48_state) % OT_CLIENT_REQUEST_VARIATION ) ) | 49 | #define OT_CLIENT_REQUEST_INTERVAL_RANDOM \ |
50 | (OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION / 2 + (int)(nrand48(ws->rand48_state) % OT_CLIENT_REQUEST_VARIATION)) | ||
46 | 51 | ||
47 | /* If WANT_MODEST_FULLSCRAPES is on, ip addresses may not | 52 | /* If WANT_MODEST_FULLSCRAPES is on, ip addresses may not |
48 | fullscrape more frequently than this amount in seconds */ | 53 | fullscrape more frequently than this amount in seconds */ |
49 | #define OT_MODEST_PEER_TIMEOUT (60*5) | 54 | #define OT_MODEST_PEER_TIMEOUT (60 * 5) |
50 | 55 | ||
51 | /* If peers come back before 10 minutes, don't live sync them */ | 56 | /* If peers come back before 10 minutes, don't live sync them */ |
52 | #define OT_CLIENT_SYNC_RENEW_BOUNDARY 10 | 57 | #define OT_CLIENT_SYNC_RENEW_BOUNDARY 10 |
53 | 58 | ||
54 | /* Number of tracker admin ip addresses allowed */ | 59 | /* Number of tracker admin ip addresses allowed */ |
55 | #define OT_ADMINIP_MAX 64 | 60 | #define OT_ADMINIP_MAX 64 |
56 | #define OT_MAX_THREADS 64 | 61 | #define OT_MAX_THREADS 64 |
57 | 62 | ||
58 | #define OT_PEER_TIMEOUT 45 | 63 | /* Number of minutes after announce before peer is removed */ |
64 | #define OT_PEER_TIMEOUT 45 | ||
59 | 65 | ||
60 | /* We maintain a list of 1024 pointers to sorted list of ot_torrent structs | 66 | /* We maintain a list of 1024 pointers to sorted list of ot_torrent structs |
61 | Sort key is, of course, its hash */ | 67 | Sort key is, of course, its hash */ |
62 | #define OT_BUCKET_COUNT_BITS 10 | 68 | #define OT_BUCKET_COUNT_BITS 10 |
63 | 69 | ||
64 | #define OT_BUCKET_COUNT (1<<OT_BUCKET_COUNT_BITS) | 70 | #define OT_BUCKET_COUNT (1 << OT_BUCKET_COUNT_BITS) |
65 | #define OT_BUCKET_COUNT_SHIFT (32-OT_BUCKET_COUNT_BITS) | 71 | #define OT_BUCKET_COUNT_SHIFT (32 - OT_BUCKET_COUNT_BITS) |
72 | |||
73 | /* if _DEBUG_RANDOMTORRENTS is set, this is the amount of torrents to create | ||
74 | on startup */ | ||
75 | #define RANDOMTORRENTS (1024 * 1024 * 1) | ||
66 | 76 | ||
67 | /* From opentracker.c */ | 77 | /* From opentracker.c */ |
68 | extern time_t g_now_seconds; | 78 | extern time_t g_now_seconds; |
69 | extern volatile int g_opentracker_running; | 79 | extern volatile int g_opentracker_running; |
70 | #define g_now_minutes (g_now_seconds/60) | 80 | #define g_now_minutes (g_now_seconds / 60) |
71 | 81 | ||
72 | extern uint32_t g_tracker_id; | 82 | extern uint32_t g_tracker_id; |
73 | typedef enum { FLAG_TCP, FLAG_UDP, FLAG_MCA, FLAG_SELFPIPE } PROTO_FLAG; | 83 | typedef enum { FLAG_TCP, FLAG_UDP, FLAG_MCA, FLAG_SELFPIPE } PROTO_FLAG; |
74 | 84 | ||
75 | typedef struct { | 85 | #define OT_PEER_COMPARE_SIZE6 ((OT_IP_SIZE6) + (OT_PORT_SIZE)) |
76 | uint8_t data[OT_IP_SIZE+2+2]; | 86 | #define OT_PEER_COMPARE_SIZE4 ((OT_IP_SIZE4) + (OT_PORT_SIZE)) |
77 | } ot_peer; | 87 | #define OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(PEER_SIZE) ((PEER_SIZE) - (OT_TIME_SIZE) - (OT_FLAG_SIZE)) |
88 | |||
89 | #define OT_PEER_SIZE6 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE6)) | ||
90 | #define OT_PEER_SIZE4 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE4)) | ||
91 | |||
92 | typedef uint8_t ot_peer; /* Generic pointer to a v6 or v4 peer */ | ||
93 | typedef uint8_t ot_peer6[OT_PEER_SIZE6]; | ||
94 | typedef uint8_t ot_peer4[OT_PEER_SIZE4]; | ||
78 | static const uint8_t PEER_FLAG_SEEDING = 0x80; | 95 | static const uint8_t PEER_FLAG_SEEDING = 0x80; |
79 | static const uint8_t PEER_FLAG_COMPLETED = 0x40; | 96 | static const uint8_t PEER_FLAG_COMPLETED = 0x40; |
80 | static const uint8_t PEER_FLAG_STOPPED = 0x20; | 97 | static const uint8_t PEER_FLAG_STOPPED = 0x20; |
81 | static const uint8_t PEER_FLAG_FROM_SYNC = 0x10; | 98 | static const uint8_t PEER_FLAG_FROM_SYNC = 0x10; |
82 | static const uint8_t PEER_FLAG_LEECHING = 0x00; | 99 | static const uint8_t PEER_FLAG_LEECHING = 0x00; |
83 | 100 | ||
84 | #ifdef WANT_V6 | 101 | /* Takes an ot_peer6 and returns the proper pointer to the peer and sets peer_size */ |
85 | #define OT_SETIP(peer,ip) memcpy((peer),(ip),(OT_IP_SIZE)) | 102 | ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size); |
86 | #else | 103 | size_t peer_size_from_peer6(ot_peer6 *peer); |
87 | #define OT_SETIP(peer,ip) memcpy((peer),(((uint8_t*)ip)+12),(OT_IP_SIZE)) | 104 | |
88 | #endif | 105 | /* New style */ |
89 | #define OT_SETPORT(peer,port) memcpy(((uint8_t*)(peer))+(OT_IP_SIZE),(port),2) | 106 | #define OT_SETIP(peer, ip) memcpy((uint8_t *)(peer), (ip), OT_IP_SIZE6) |
90 | #define OT_PEERFLAG(peer) (((uint8_t*)(peer))[(OT_IP_SIZE)+2]) | 107 | #define OT_SETPORT(peer, port) memcpy(((uint8_t *)(peer)) + (OT_IP_SIZE6), (port), 2) |
91 | #define OT_PEERTIME(peer) (((uint8_t*)(peer))[(OT_IP_SIZE)+3]) | 108 | #define OT_PEERFLAG(peer) (((uint8_t *)(peer))[(OT_IP_SIZE6) + 2]) |
109 | #define OT_PEERFLAG_D(peer, peersize) (((uint8_t *)(peer))[(peersize) - 2]) | ||
110 | #define OT_PEERTIME(peer, peersize) (((uint8_t *)(peer))[(peersize) - 1]) | ||
111 | |||
112 | #define PEERS_BENCODED6 "6:peers6" | ||
113 | #define PEERS_BENCODED4 "5:peers" | ||
92 | 114 | ||
93 | #define OT_HASH_COMPARE_SIZE (sizeof(ot_hash)) | 115 | #define OT_HASH_COMPARE_SIZE (sizeof(ot_hash)) |
94 | #define OT_PEER_COMPARE_SIZE ((OT_IP_SIZE)+2) | ||
95 | 116 | ||
96 | struct ot_peerlist; | 117 | struct ot_peerlist; |
97 | typedef struct ot_peerlist ot_peerlist; | 118 | typedef struct ot_peerlist ot_peerlist; |
98 | typedef struct { | 119 | typedef struct { |
99 | ot_hash hash; | 120 | ot_hash hash; |
100 | ot_peerlist *peer_list; | 121 | ot_peerlist *peer_list6; |
122 | ot_peerlist *peer_list4; | ||
101 | } ot_torrent; | 123 | } ot_torrent; |
102 | 124 | ||
103 | #include "ot_vector.h" | 125 | #include "ot_vector.h" |
104 | 126 | ||
105 | struct ot_peerlist { | 127 | struct ot_peerlist { |
106 | ot_time base; | 128 | ot_time base; |
107 | size_t seed_count; | 129 | size_t seed_count; |
108 | size_t peer_count; | 130 | size_t peer_count; |
109 | size_t down_count; | 131 | size_t down_count; |
110 | /* normal peers vector or | 132 | /* normal peers vector or |
111 | pointer to ot_vector[32] buckets if data != NULL and space == 0 | 133 | pointer to ot_vector[32] buckets if data != NULL and space == 0 |
112 | */ | 134 | */ |
113 | ot_vector peers; | 135 | ot_vector peers; |
114 | }; | 136 | }; |
115 | #define OT_PEERLIST_HASBUCKETS(peer_list) ((peer_list)->peers.size > (peer_list)->peers.space) | 137 | #define OT_PEERLIST_HASBUCKETS(peer_list) ((peer_list)->peers.size > (peer_list)->peers.space) |
116 | 138 | ||
117 | struct ot_workstruct { | 139 | struct ot_workstruct { |
118 | /* Thread specific, static */ | 140 | /* Thread specific, static */ |
119 | char *inbuf; | 141 | char *inbuf; |
120 | #define G_INBUF_SIZE 8192 | 142 | #define G_INBUF_SIZE 8192 |
121 | char *outbuf; | 143 | char *outbuf; |
122 | #define G_OUTBUF_SIZE 8192 | 144 | #define G_OUTBUF_SIZE 8192 |
123 | #ifdef _DEBUG_HTTPERROR | 145 | #ifdef _DEBUG_HTTPERROR |
124 | char *debugbuf; | 146 | char *debugbuf; |
125 | #define G_DEBUGBUF_SIZE 8192 | 147 | #define G_DEBUGBUF_SIZE 8192 |
126 | #endif | 148 | #endif |
127 | 149 | ||
128 | /* The peer currently in the working */ | 150 | /* The peer currently in the working */ |
129 | ot_peer peer; | 151 | ot_peer6 peer; /* Can fit v6 and v4 peers */ |
130 | 152 | ||
131 | /* Pointers into the request buffer */ | 153 | /* Pointers into the request buffer */ |
132 | ot_hash *hash; | 154 | ot_hash *hash; |
133 | char *peer_id; | 155 | char *peer_id; |
134 | 156 | ||
135 | /* HTTP specific, non static */ | 157 | /* HTTP specific, non static */ |
136 | int keep_alive; | ||
137 | char *request; | 158 | char *request; |
138 | ssize_t request_size; | 159 | ssize_t request_size; |
139 | ssize_t header_size; | 160 | ssize_t header_size; |
@@ -143,6 +164,8 @@ struct ot_workstruct { | |||
143 | /* Entropy state for rand48 function so that threads don't need to acquire mutexes for | 164 | /* Entropy state for rand48 function so that threads don't need to acquire mutexes for |
144 | global random() or arc4random() state, which causes heavy load on linuxes */ | 165 | global random() or arc4random() state, which causes heavy load on linuxes */ |
145 | uint16_t rand48_state[3]; | 166 | uint16_t rand48_state[3]; |
167 | |||
168 | int keep_alive; | ||
146 | }; | 169 | }; |
147 | 170 | ||
148 | /* | 171 | /* |
@@ -154,31 +177,34 @@ struct ot_workstruct { | |||
154 | #endif | 177 | #endif |
155 | 178 | ||
156 | #ifdef WANT_SYNC | 179 | #ifdef WANT_SYNC |
157 | #define WANT_SYNC_PARAM( param ) , param | 180 | #define WANT_SYNC_PARAM(param) , param |
158 | #else | 181 | #else |
159 | #define WANT_SYNC_PARAM( param ) | 182 | #define WANT_SYNC_PARAM(param) |
160 | #endif | 183 | #endif |
161 | 184 | ||
162 | #ifdef WANT_LOG_NETWORKS | 185 | #ifdef WANT_LOG_NETWORKS |
163 | #error Live logging networks disabled at the moment. | 186 | #error Live logging networks disabled at the moment. |
164 | #endif | 187 | #endif |
165 | 188 | ||
166 | void trackerlogic_init( ); | 189 | void trackerlogic_init(void); |
167 | void trackerlogic_deinit( void ); | 190 | void trackerlogic_deinit(void); |
168 | void exerr( char * message ); | 191 | void exerr(char *message); |
169 | 192 | ||
170 | /* add_peer_to_torrent does only release the torrent bucket if from_sync is set, | 193 | /* add_peer_to_torrent does only release the torrent bucket if from_sync is set, |
171 | otherwise it is released in return_peers_for_torrent */ | 194 | otherwise it is released in return_peers_for_torrent */ |
172 | size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount ); | 195 | size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount); |
173 | size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws ); | 196 | size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws); |
174 | size_t return_tcp_scrape_for_torrent( ot_hash *hash, int amount, char *reply ); | 197 | size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply); |
175 | size_t return_udp_scrape_for_torrent( ot_hash hash, char *reply ); | 198 | size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply); |
176 | void add_torrent_from_saved_state( ot_hash hash, ot_time base, size_t down_count ); | 199 | void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count); |
200 | #ifdef _DEBUG_RANDOMTORRENTS | ||
201 | void trackerlogic_add_random_torrents(size_t amount); | ||
202 | #endif | ||
177 | 203 | ||
178 | /* torrent iterator */ | 204 | /* torrent iterator */ |
179 | void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ); | 205 | void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data); |
180 | 206 | ||
181 | /* Helper, before it moves to its own object */ | 207 | /* Helper, before it moves to its own object */ |
182 | void free_peerlist( ot_peerlist *peer_list ); | 208 | void free_peerlist(ot_peerlist *peer_list); |
183 | 209 | ||
184 | #endif | 210 | #endif |