diff options
| -rw-r--r-- | .clang-format | 246 | ||||
| -rw-r--r-- | .gitignore | 2 | ||||
| -rw-r--r-- | Makefile | 34 | ||||
| -rw-r--r-- | Makefile.arc4random | 3 | ||||
| -rw-r--r-- | Makefile.gzip | 4 | ||||
| -rw-r--r-- | Makefile.zstd | 3 | ||||
| -rw-r--r-- | man1/opentracker.1 | 130 | ||||
| -rw-r--r-- | man4/opentracker.conf.4 | 86 | ||||
| -rw-r--r-- | opentracker.c | 879 | ||||
| -rw-r--r-- | opentracker.conf.sample | 13 | ||||
| -rw-r--r-- | ot_accesslist.c | 318 | ||||
| -rw-r--r-- | ot_accesslist.h | 47 | ||||
| -rw-r--r-- | ot_clean.c | 135 | ||||
| -rw-r--r-- | ot_clean.h | 10 | ||||
| -rw-r--r-- | ot_fullscrape.c | 470 | ||||
| -rw-r--r-- | ot_fullscrape.h | 8 | ||||
| -rw-r--r-- | ot_http.c | 845 | ||||
| -rw-r--r-- | ot_http.h | 15 | ||||
| -rw-r--r-- | ot_iovec.c | 76 | ||||
| -rw-r--r-- | ot_iovec.h | 11 | ||||
| -rw-r--r-- | ot_livesync.c | 205 | ||||
| -rw-r--r-- | ot_livesync.h | 16 | ||||
| -rw-r--r-- | ot_mutex.c | 190 | ||||
| -rw-r--r-- | ot_mutex.h | 113 | ||||
| -rw-r--r-- | ot_rijndael.c | 2 | ||||
| -rw-r--r-- | ot_stats.c | 1004 | ||||
| -rw-r--r-- | ot_stats.h | 24 | ||||
| -rw-r--r-- | ot_sync.c | 118 | ||||
| -rw-r--r-- | ot_sync.h | 8 | ||||
| -rw-r--r-- | ot_udp.c | 265 | ||||
| -rw-r--r-- | ot_udp.h | 4 | ||||
| -rw-r--r-- | ot_vector.c | 247 | ||||
| -rw-r--r-- | ot_vector.h | 24 | ||||
| -rw-r--r-- | proxy.c | 852 | ||||
| -rw-r--r-- | scan_urlencoded_query.c | 99 | ||||
| -rw-r--r-- | scan_urlencoded_query.h | 6 | ||||
| -rw-r--r-- | tests/testsuite2.sh | 24 | ||||
| -rw-r--r-- | trackerlogic.c | 635 | ||||
| -rw-r--r-- | trackerlogic.h | 165 |
39 files changed, 4289 insertions, 3047 deletions
diff --git a/.clang-format b/.clang-format new file mode 100644 index 0000000..cf3c715 --- /dev/null +++ b/.clang-format | |||
| @@ -0,0 +1,246 @@ | |||
| 1 | --- | ||
| 2 | Language: Cpp | ||
| 3 | # BasedOnStyle: LLVM | ||
| 4 | AccessModifierOffset: -2 | ||
| 5 | AlignAfterOpenBracket: Align | ||
| 6 | AlignArrayOfStructures: None | ||
| 7 | AlignConsecutiveAssignments: | ||
| 8 | Enabled: true | ||
| 9 | AcrossEmptyLines: true | ||
| 10 | AcrossComments: true | ||
| 11 | AlignCompound: true | ||
| 12 | AlignFunctionPointers: false | ||
| 13 | PadOperators: true | ||
| 14 | AlignConsecutiveBitFields: | ||
| 15 | Enabled: false | ||
| 16 | AcrossEmptyLines: true | ||
| 17 | AcrossComments: true | ||
| 18 | AlignCompound: false | ||
| 19 | AlignFunctionPointers: false | ||
| 20 | PadOperators: false | ||
| 21 | AlignConsecutiveDeclarations: | ||
| 22 | Enabled: true | ||
| 23 | AcrossEmptyLines: true | ||
| 24 | AcrossComments: true | ||
| 25 | AlignCompound: true | ||
| 26 | AlignFunctionPointers: false | ||
| 27 | PadOperators: true | ||
| 28 | AlignConsecutiveMacros: | ||
| 29 | Enabled: true | ||
| 30 | AcrossEmptyLines: true | ||
| 31 | AcrossComments: true | ||
| 32 | AlignCompound: true | ||
| 33 | AlignFunctionPointers: false | ||
| 34 | PadOperators: false | ||
| 35 | AlignConsecutiveShortCaseStatements: | ||
| 36 | Enabled: true | ||
| 37 | AcrossEmptyLines: true | ||
| 38 | AcrossComments: true | ||
| 39 | AlignCaseColons: false | ||
| 40 | AlignEscapedNewlines: Right | ||
| 41 | AlignOperands: Align | ||
| 42 | AlignTrailingComments: | ||
| 43 | Kind: Always | ||
| 44 | OverEmptyLines: 0 | ||
| 45 | AllowAllArgumentsOnNextLine: true | ||
| 46 | AllowAllParametersOfDeclarationOnNextLine: true | ||
| 47 | AllowBreakBeforeNoexceptSpecifier: Never | ||
| 48 | AllowShortBlocksOnASingleLine: Never | ||
| 49 | AllowShortCaseLabelsOnASingleLine: false | ||
| 50 | AllowShortCompoundRequirementOnASingleLine: true | ||
| 51 | AllowShortEnumsOnASingleLine: true | ||
| 52 | AllowShortFunctionsOnASingleLine: All | ||
| 53 | AllowShortIfStatementsOnASingleLine: Never | ||
| 54 | AllowShortLambdasOnASingleLine: All | ||
| 55 | AllowShortLoopsOnASingleLine: false | ||
| 56 | AlwaysBreakAfterDefinitionReturnType: None | ||
| 57 | AlwaysBreakAfterReturnType: None | ||
| 58 | AlwaysBreakBeforeMultilineStrings: false | ||
| 59 | AlwaysBreakTemplateDeclarations: MultiLine | ||
| 60 | AttributeMacros: | ||
| 61 | - __capability | ||
| 62 | BinPackArguments: true | ||
| 63 | BinPackParameters: true | ||
| 64 | BitFieldColonSpacing: Both | ||
| 65 | BraceWrapping: | ||
| 66 | AfterCaseLabel: false | ||
| 67 | AfterClass: false | ||
| 68 | AfterControlStatement: Never | ||
| 69 | AfterEnum: false | ||
| 70 | AfterExternBlock: false | ||
| 71 | AfterFunction: false | ||
| 72 | AfterNamespace: false | ||
| 73 | AfterObjCDeclaration: false | ||
| 74 | AfterStruct: false | ||
| 75 | AfterUnion: false | ||
| 76 | BeforeCatch: false | ||
| 77 | BeforeElse: false | ||
| 78 | BeforeLambdaBody: false | ||
| 79 | BeforeWhile: false | ||
| 80 | IndentBraces: false | ||
| 81 | SplitEmptyFunction: true | ||
| 82 | SplitEmptyRecord: true | ||
| 83 | SplitEmptyNamespace: true | ||
| 84 | BreakAdjacentStringLiterals: true | ||
| 85 | BreakAfterAttributes: Leave | ||
| 86 | BreakAfterJavaFieldAnnotations: false | ||
| 87 | BreakArrays: true | ||
| 88 | BreakBeforeBinaryOperators: None | ||
| 89 | BreakBeforeConceptDeclarations: Always | ||
| 90 | BreakBeforeBraces: Attach | ||
| 91 | BreakBeforeInlineASMColon: OnlyMultiline | ||
| 92 | BreakBeforeTernaryOperators: true | ||
| 93 | BreakConstructorInitializers: BeforeColon | ||
| 94 | BreakInheritanceList: BeforeColon | ||
| 95 | BreakStringLiterals: false | ||
| 96 | ColumnLimit: 160 | ||
| 97 | CommentPragmas: '^ IWYU pragma:' | ||
| 98 | CompactNamespaces: false | ||
| 99 | ConstructorInitializerIndentWidth: 4 | ||
| 100 | ContinuationIndentWidth: 4 | ||
| 101 | Cpp11BracedListStyle: true | ||
| 102 | DerivePointerAlignment: false | ||
| 103 | DisableFormat: false | ||
| 104 | EmptyLineAfterAccessModifier: Never | ||
| 105 | EmptyLineBeforeAccessModifier: LogicalBlock | ||
| 106 | ExperimentalAutoDetectBinPacking: false | ||
| 107 | FixNamespaceComments: true | ||
| 108 | ForEachMacros: | ||
| 109 | - foreach | ||
| 110 | - Q_FOREACH | ||
| 111 | - BOOST_FOREACH | ||
| 112 | IfMacros: | ||
| 113 | - KJ_IF_MAYBE | ||
| 114 | IncludeBlocks: Preserve | ||
| 115 | IncludeCategories: | ||
| 116 | - Regex: '^"(llvm|llvm-c|clang|clang-c)/' | ||
| 117 | Priority: 2 | ||
| 118 | SortPriority: 0 | ||
| 119 | CaseSensitive: false | ||
| 120 | - Regex: '^(<|"(gtest|gmock|isl|json)/)' | ||
| 121 | Priority: 3 | ||
| 122 | SortPriority: 0 | ||
| 123 | CaseSensitive: false | ||
| 124 | - Regex: '.*' | ||
| 125 | Priority: 1 | ||
| 126 | SortPriority: 0 | ||
| 127 | CaseSensitive: false | ||
| 128 | IncludeIsMainRegex: '(Test)?$' | ||
| 129 | IncludeIsMainSourceRegex: '' | ||
| 130 | IndentAccessModifiers: false | ||
| 131 | IndentCaseBlocks: false | ||
| 132 | IndentCaseLabels: false | ||
| 133 | IndentExternBlock: AfterExternBlock | ||
| 134 | IndentGotoLabels: true | ||
| 135 | IndentPPDirectives: None | ||
| 136 | IndentRequiresClause: true | ||
| 137 | IndentWidth: 2 | ||
| 138 | IndentWrappedFunctionNames: false | ||
| 139 | InsertBraces: false | ||
| 140 | InsertNewlineAtEOF: false | ||
| 141 | InsertTrailingCommas: None | ||
| 142 | IntegerLiteralSeparator: | ||
| 143 | Binary: 0 | ||
| 144 | BinaryMinDigits: 0 | ||
| 145 | Decimal: 0 | ||
| 146 | DecimalMinDigits: 0 | ||
| 147 | Hex: 0 | ||
| 148 | HexMinDigits: 0 | ||
| 149 | JavaScriptQuotes: Leave | ||
| 150 | JavaScriptWrapImports: true | ||
| 151 | KeepEmptyLinesAtTheStartOfBlocks: true | ||
| 152 | KeepEmptyLinesAtEOF: false | ||
| 153 | LambdaBodyIndentation: Signature | ||
| 154 | LineEnding: DeriveLF | ||
| 155 | MacroBlockBegin: '' | ||
| 156 | MacroBlockEnd: '' | ||
| 157 | MaxEmptyLinesToKeep: 1 | ||
| 158 | NamespaceIndentation: None | ||
| 159 | ObjCBinPackProtocolList: Auto | ||
| 160 | ObjCBlockIndentWidth: 2 | ||
| 161 | ObjCBreakBeforeNestedBlockParam: true | ||
| 162 | ObjCSpaceAfterProperty: false | ||
| 163 | ObjCSpaceBeforeProtocolList: true | ||
| 164 | PackConstructorInitializers: BinPack | ||
| 165 | PenaltyBreakAssignment: 2 | ||
| 166 | PenaltyBreakBeforeFirstCallParameter: 19 | ||
| 167 | PenaltyBreakComment: 300 | ||
| 168 | PenaltyBreakFirstLessLess: 120 | ||
| 169 | PenaltyBreakOpenParenthesis: 0 | ||
| 170 | PenaltyBreakScopeResolution: 500 | ||
| 171 | PenaltyBreakString: 1000 | ||
| 172 | PenaltyBreakTemplateDeclaration: 10 | ||
| 173 | PenaltyExcessCharacter: 1000000 | ||
| 174 | PenaltyIndentedWhitespace: 0 | ||
| 175 | PenaltyReturnTypeOnItsOwnLine: 60 | ||
| 176 | PointerAlignment: Right | ||
| 177 | PPIndentWidth: -1 | ||
| 178 | QualifierAlignment: Leave | ||
| 179 | ReferenceAlignment: Pointer | ||
| 180 | ReflowComments: true | ||
| 181 | RemoveBracesLLVM: false | ||
| 182 | RemoveParentheses: Leave | ||
| 183 | RemoveSemicolon: false | ||
| 184 | RequiresClausePosition: OwnLine | ||
| 185 | RequiresExpressionIndentation: OuterScope | ||
| 186 | SeparateDefinitionBlocks: Leave | ||
| 187 | ShortNamespaceLines: 1 | ||
| 188 | SkipMacroDefinitionBody: false | ||
| 189 | SortIncludes: CaseSensitive | ||
| 190 | SortJavaStaticImport: Before | ||
| 191 | SortUsingDeclarations: LexicographicNumeric | ||
| 192 | SpaceAfterCStyleCast: false | ||
| 193 | SpaceAfterLogicalNot: false | ||
| 194 | SpaceAfterTemplateKeyword: true | ||
| 195 | SpaceAroundPointerQualifiers: Default | ||
| 196 | SpaceBeforeAssignmentOperators: true | ||
| 197 | SpaceBeforeCaseColon: false | ||
| 198 | SpaceBeforeCpp11BracedList: false | ||
| 199 | SpaceBeforeCtorInitializerColon: true | ||
| 200 | SpaceBeforeInheritanceColon: true | ||
| 201 | SpaceBeforeJsonColon: false | ||
| 202 | SpaceBeforeParens: ControlStatements | ||
| 203 | SpaceBeforeParensOptions: | ||
| 204 | AfterControlStatements: true | ||
| 205 | AfterForeachMacros: true | ||
| 206 | AfterFunctionDefinitionName: false | ||
| 207 | AfterFunctionDeclarationName: false | ||
| 208 | AfterIfMacros: true | ||
| 209 | AfterOverloadedOperator: false | ||
| 210 | AfterPlacementOperator: true | ||
| 211 | AfterRequiresInClause: false | ||
| 212 | AfterRequiresInExpression: false | ||
| 213 | BeforeNonEmptyParentheses: false | ||
| 214 | SpaceBeforeRangeBasedForLoopColon: true | ||
| 215 | SpaceBeforeSquareBrackets: false | ||
| 216 | SpaceInEmptyBlock: false | ||
| 217 | SpacesBeforeTrailingComments: 1 | ||
| 218 | SpacesInAngles: Never | ||
| 219 | SpacesInContainerLiterals: true | ||
| 220 | SpacesInLineCommentPrefix: | ||
| 221 | Minimum: 1 | ||
| 222 | Maximum: -1 | ||
| 223 | SpacesInParens: Never | ||
| 224 | SpacesInParensOptions: | ||
| 225 | InCStyleCasts: false | ||
| 226 | InConditionalStatements: false | ||
| 227 | InEmptyParentheses: false | ||
| 228 | Other: false | ||
| 229 | SpacesInSquareBrackets: false | ||
| 230 | Standard: Latest | ||
| 231 | StatementAttributeLikeMacros: | ||
| 232 | - Q_EMIT | ||
| 233 | StatementMacros: | ||
| 234 | - Q_UNUSED | ||
| 235 | - QT_REQUIRE_VERSION | ||
| 236 | TabWidth: 8 | ||
| 237 | UseTab: Never | ||
| 238 | VerilogBreakBetweenInstancePorts: true | ||
| 239 | WhitespaceSensitiveMacros: | ||
| 240 | - BOOST_PP_STRINGIZE | ||
| 241 | - CF_SWIFT_NAME | ||
| 242 | - NS_SWIFT_NAME | ||
| 243 | - PP_STRINGIZE | ||
| 244 | - STRINGIZE | ||
| 245 | ... | ||
| 246 | |||
diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..874c63c --- /dev/null +++ b/.gitignore | |||
| @@ -0,0 +1,2 @@ | |||
| 1 | *.o | ||
| 2 | |||
| @@ -1,7 +1,5 @@ | |||
| 1 | # $Id$ | 1 | # $Id$ |
| 2 | 2 | ||
| 3 | CC?=gcc | ||
| 4 | |||
| 5 | # Linux flavour | 3 | # Linux flavour |
| 6 | # PREFIX?=/opt/diet | 4 | # PREFIX?=/opt/diet |
| 7 | # LIBOWFAT_HEADERS=$(PREFIX)/include | 5 | # LIBOWFAT_HEADERS=$(PREFIX)/include |
| @@ -20,16 +18,24 @@ LIBOWFAT_LIBRARY=$(PREFIX)/libowfat | |||
| 20 | BINDIR?=$(PREFIX)/bin | 18 | BINDIR?=$(PREFIX)/bin |
| 21 | STRIP?=strip | 19 | STRIP?=strip |
| 22 | 20 | ||
| 23 | #FEATURES+=-DWANT_V6 | 21 | #FEATURES+=-DWANT_V4_ONLY |
| 24 | |||
| 25 | #FEATURES+=-DWANT_ACCESSLIST_BLACK | 22 | #FEATURES+=-DWANT_ACCESSLIST_BLACK |
| 26 | #FEATURES+=-DWANT_ACCESSLIST_WHITE | 23 | #FEATURES+=-DWANT_ACCESSLIST_WHITE |
| 27 | #FEATURES+=-DWANT_DYNAMIC_ACCESSLIST | 24 | #FEATURES+=-DWANT_DYNAMIC_ACCESSLIST |
| 28 | 25 | ||
| 29 | #FEATURES+=-DWANT_SYNC_LIVE | 26 | #FEATURES+=-DWANT_SYNC_LIVE |
| 30 | #FEATURES+=-DWANT_IP_FROM_QUERY_STRING | 27 | #FEATURES+=-DWANT_IP_FROM_QUERY_STRING |
| 31 | #FEATURES+=-DWANT_COMPRESSION_GZIP | 28 | |
| 32 | #FEATURES+=-DWANT_COMPRESSION_GZIP_ALWAYS | 29 | # If you want gzip support to be compiled in, uncomment the next include. |
| 30 | # You can further modify the behaviour by setting DWANT_COMPRESSION_GZIP_ALWAYS | ||
| 31 | # in Makefile.gzip | ||
| 32 | include Makefile.gzip | ||
| 33 | |||
| 34 | # If you want zstd support to be compiled in, uncomment the next include. | ||
| 35 | # You can further modify the behaviour by setting DWANT_COMPRESSION_ZSTD_ALWAYS | ||
| 36 | # in Makefile.zstd | ||
| 37 | #include Makefile.zstd | ||
| 38 | |||
| 33 | #FEATURES+=-DWANT_LOG_NETWORKS | 39 | #FEATURES+=-DWANT_LOG_NETWORKS |
| 34 | #FEATURES+=-DWANT_RESTRICT_STATS | 40 | #FEATURES+=-DWANT_RESTRICT_STATS |
| 35 | #FEATURES+=-DWANT_IP_FROM_PROXY | 41 | #FEATURES+=-DWANT_IP_FROM_PROXY |
| @@ -41,18 +47,24 @@ STRIP?=strip | |||
| 41 | #FEATURES+=-DWANT_DEV_RANDOM | 47 | #FEATURES+=-DWANT_DEV_RANDOM |
| 42 | FEATURES+=-DWANT_FULLSCRAPE | 48 | FEATURES+=-DWANT_FULLSCRAPE |
| 43 | 49 | ||
| 50 | # You need libowfat version 0.34 to allow for automatic release of chunks during | ||
| 51 | # full scrape transfer, if you rely on an older versions, enable this flag | ||
| 52 | #FEATURES+=-DWANT_NO_AUTO_FREE | ||
| 53 | |||
| 44 | # Is enabled on BSD systems by default in trackerlogic.h | 54 | # Is enabled on BSD systems by default in trackerlogic.h |
| 45 | # on Linux systems you will need -lbds | 55 | # on Linux systems the include Makefile adds -lbsd |
| 46 | #FEATURES+=-DWANT_ARC4RANDOM | 56 | #include Makefile.arc4random |
| 47 | 57 | ||
| 48 | #FEATURES+=-D_DEBUG_HTTPERROR | 58 | #FEATURES+=-D_DEBUG_HTTPERROR |
| 59 | #FEATURES+=-D_DEBUG_RANDOMTORRENTS | ||
| 60 | |||
| 61 | GIT_VERSION=$(shell sh -c 'command -v git >/dev/null && test -d .git && git rev-parse HEAD || echo _git_or_commit_not_found_') | ||
| 49 | 62 | ||
| 50 | OPTS_debug=-D_DEBUG -g -ggdb # -pg -fprofile-arcs -ftest-coverage | 63 | OPTS_debug=-D_DEBUG -g -ggdb # -pg -fprofile-arcs -ftest-coverage |
| 51 | OPTS_production=-O3 | 64 | OPTS_production=-O3 |
| 52 | 65 | ||
| 53 | CFLAGS+=-I$(LIBOWFAT_HEADERS) -Wall -pipe -Wextra #-ansi -pedantic | 66 | CFLAGS+=-I$(LIBOWFAT_HEADERS) -DGIT_VERSION=$(GIT_VERSION) -Wall -pipe -pthread -Wextra #-ansi -pedantic |
| 54 | LDFLAGS+=-L$(LIBOWFAT_LIBRARY) -lowfat -pthread -lpthread -lz | 67 | LDFLAGS+=-L$(LIBOWFAT_LIBRARY) -lowfat -pthread |
| 55 | #LDFLAGS+=-lbsd | ||
| 56 | 68 | ||
| 57 | BINARY =opentracker | 69 | BINARY =opentracker |
| 58 | HEADERS=trackerlogic.h scan_urlencoded_query.h ot_mutex.h ot_stats.h ot_vector.h ot_clean.h ot_udp.h ot_iovec.h ot_fullscrape.h ot_accesslist.h ot_http.h ot_livesync.h ot_rijndael.h | 70 | HEADERS=trackerlogic.h scan_urlencoded_query.h ot_mutex.h ot_stats.h ot_vector.h ot_clean.h ot_udp.h ot_iovec.h ot_fullscrape.h ot_accesslist.h ot_http.h ot_livesync.h ot_rijndael.h |
diff --git a/Makefile.arc4random b/Makefile.arc4random new file mode 100644 index 0000000..1488408 --- /dev/null +++ b/Makefile.arc4random | |||
| @@ -0,0 +1,3 @@ | |||
| 1 | FEATURES+=-DWANT_ARC4RANDOM | ||
| 2 | LDFLAGS+=-lbsd | ||
| 3 | |||
diff --git a/Makefile.gzip b/Makefile.gzip new file mode 100644 index 0000000..70d6d62 --- /dev/null +++ b/Makefile.gzip | |||
| @@ -0,0 +1,4 @@ | |||
| 1 | FEATURES+=-DWANT_COMPRESSION_GZIP | ||
| 2 | #FEATURES+=-DWANT_COMPRESSION_GZIP_ALWAYS | ||
| 3 | |||
| 4 | LDFLAGS+=-lz | ||
diff --git a/Makefile.zstd b/Makefile.zstd new file mode 100644 index 0000000..2bb56be --- /dev/null +++ b/Makefile.zstd | |||
| @@ -0,0 +1,3 @@ | |||
| 1 | FEATURES+=-DWANT_COMPRESSION_ZSTD | ||
| 2 | #FEATURES+=-DWANT_COMPRESSION_ZSTD_ALWAYS | ||
| 3 | LDFLAGS+=-lzstd | ||
diff --git a/man1/opentracker.1 b/man1/opentracker.1 new file mode 100644 index 0000000..0103ebe --- /dev/null +++ b/man1/opentracker.1 | |||
| @@ -0,0 +1,130 @@ | |||
| 1 | .Dd 15/4/2024 | ||
| 2 | .Dt opentracker 1 | ||
| 3 | .Os Unix | ||
| 4 | .Sh opentracker | ||
| 5 | .Nm opentracker | ||
| 6 | .Nd a free and open bittorrent tracker | ||
| 7 | .Sh SYNOPSIS | ||
| 8 | .Nm | ||
| 9 | .Op Fl f Ar config | ||
| 10 | .Op Fl i Ar ip-select | ||
| 11 | .Op Fl p Ar port-bind-tcp | ||
| 12 | .Op Fl P Ar port-bind-udp | ||
| 13 | .Op Fl A Ar blessed-ip | ||
| 14 | .Op Fl r Ar redirect-url | ||
| 15 | .Op Fl d Ar chdir | ||
| 16 | .Op Fl u Ar user | ||
| 17 | .Op Fl w| Fl b accesslist | ||
| 18 | .Sh DESCRIPTION | ||
| 19 | .Nm | ||
| 20 | is a bittorrent tracker that implements announce and scrape actions over the | ||
| 21 | UDP and the plain http protocol, aiming for minimal resource usage. | ||
| 22 | .Pp | ||
| 23 | When invoked with parameters, it binds to TCP and UDP port 6969 on all | ||
| 24 | interfaces. The recommended way to configure opentracker is by providing a | ||
| 25 | config file using the | ||
| 26 | .Op Fl f Ar config | ||
| 27 | option. See | ||
| 28 | .Xr opentracker.conf 4 | ||
| 29 | for details. | ||
| 30 | .Pp | ||
| 31 | .Sh OPTIONS | ||
| 32 | The following options are available: | ||
| 33 | .Bl -tag -width -indent=8 | ||
| 34 | .It Fl f Ar config | ||
| 35 | Parse a config file with a list of options. Consecutive command options | ||
| 36 | will override options from the config file. See | ||
| 37 | .Xr opentracker.conf 4 | ||
| 38 | for details. | ||
| 39 | .It Fl i Ar ip-select | ||
| 40 | Select an ip address that will be used with the next | ||
| 41 | .Op Fl p | ||
| 42 | or | ||
| 43 | .Op Fl P | ||
| 44 | command to actually bind to this address. Setting this option without any bind | ||
| 45 | options in the config file or | ||
| 46 | .Op Fl p | ||
| 47 | or | ||
| 48 | .Op Fl P | ||
| 49 | commands will limit opentracker to only bind to this address. | ||
| 50 | .It Fl p Ar port-bind-tcp | ||
| 51 | Bind to the TCP port on the last preceding ip address set with the | ||
| 52 | .Op Fl i ip-select | ||
| 53 | option or to all available addresses if none has been set. Can be given multiple | ||
| 54 | times. | ||
| 55 | .It Fl P Ar port-bind-udp | ||
| 56 | Bind to the UDP port on the last preceding ip address set with the | ||
| 57 | .Op Fl i ip-select | ||
| 58 | option or to all available addresses if none has been set. Can be given multiple | ||
| 59 | times. | ||
| 60 | .It Fl A Ar blessed-ip | ||
| 61 | Set an ip address in IPv4 or IPv6 or a net in CIDR notation to bless the network | ||
| 62 | for access to restricted resources. | ||
| 63 | .It Fl r Ar redirect-url | ||
| 64 | Set the URL that | ||
| 65 | .Nm | ||
| 66 | will redirect users to when the / address is requested via HTTP. | ||
| 67 | .It Fl d Ar chdir | ||
| 68 | Sets the directory | ||
| 69 | .Nm | ||
| 70 | will | ||
| 71 | .Xr chroot 2 | ||
| 72 | to if ran as root or | ||
| 73 | .Xr chdir 2 | ||
| 74 | to if ran as unprivileged user. Note that any accesslist files need to be | ||
| 75 | relative to and within that directory. | ||
| 76 | .It Fl u Ar user | ||
| 77 | User to run | ||
| 78 | .Nm | ||
| 79 | under after all operations that need privileges have finished. | ||
| 80 | .It Fl w Ar accesslist | Fl b Ar accesslist | ||
| 81 | If | ||
| 82 | .Nm | ||
| 83 | has been compiled with the | ||
| 84 | .B WANT_ACCESSLIST_BLACK | ||
| 85 | or | ||
| 86 | .Br WANT_ACCESSLIST_WHITE | ||
| 87 | options, this option sets the location of the accesslist. | ||
| 88 | .El | ||
| 89 | .Sh EXAMPLES | ||
| 90 | Start | ||
| 91 | .Nm | ||
| 92 | bound on UDP and TCP ports 6969 on IPv6 localhost. | ||
| 93 | .Dl # ./opentracker -i ::1 -p 6969 -P 6969 | ||
| 94 | .Pp | ||
| 95 | Start | ||
| 96 | .Nm | ||
| 97 | bound on UDP port 6868 and TCP port 6868 on IPv4 localhost and allow | ||
| 98 | privileged access from the network 192.168/16 while redirecting | ||
| 99 | HTTP clients accessing the root directory, which is not covered by the | ||
| 100 | bittorrent tracker protocol, to https://my-trackersite.com/. | ||
| 101 | .Dl # ./opentracker -i 192.168.0.4 -p 6868 -P 6969 -A 192.168/16 -r https://my-trackersite.com/ | ||
| 102 | The announce URLs are http://192.168.0.4:6868/announce and | ||
| 103 | udp://192.168.0.4:6868/announce respectively. | ||
| 104 | .Sh FILES | ||
| 105 | .Bl -tag -width indent | ||
| 106 | .It Pa opentracker.conf | ||
| 107 | The | ||
| 108 | .Nm | ||
| 109 | config file. | ||
| 110 | .El | ||
| 111 | .Sh SEE ALSO | ||
| 112 | .Xr opentracker.conf 4 | ||
| 113 | .Pp | ||
| 114 | opentracker documentation | ||
| 115 | .Lk https://erdgeist.org/arts/software/opentracker | ||
| 116 | .Pp | ||
| 117 | Bittorrent tracker protocol | ||
| 118 | .Lk http://www.bittorrent.org/beps/bep_0015.html | ||
| 119 | .Sh AUTHOR | ||
| 120 | .An Dirk Engling | ||
| 121 | .Aq Mt erdgeist@erdgeist.org . | ||
| 122 | .Sh LICENSE | ||
| 123 | This software is released under the Beerware License: | ||
| 124 | .Pp | ||
| 125 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software | ||
| 126 | and associated documentation files (the "Software"), to deal in the Software with the following | ||
| 127 | terms and conditions: | ||
| 128 | .Pp | ||
| 129 | If you meet the author(s) someday, and you think this software is worth it, you can buy them | ||
| 130 | a beer in return. | ||
diff --git a/man4/opentracker.conf.4 b/man4/opentracker.conf.4 new file mode 100644 index 0000000..2bc1389 --- /dev/null +++ b/man4/opentracker.conf.4 | |||
| @@ -0,0 +1,86 @@ | |||
| 1 | .Dd 2024-04-18 | ||
| 2 | .Dt opentracker.conf 5 | ||
| 3 | .Os Unix | ||
| 4 | .Sh NAME | ||
| 5 | .Nm opentracker.conf | ||
| 6 | .Nd configuration file for opentracker | ||
| 7 | .Sh SYNOPSIS | ||
| 8 | .Nm | ||
| 9 | .Sh DESCRIPTION | ||
| 10 | The | ||
| 11 | .Nm | ||
| 12 | configuration file specifies various options for configuring the behavior of the opentracker program. | ||
| 13 | .Pp | ||
| 14 | Lines starting with '#' are comments and are ignored. Options are specified as 'keyword value' pairs. | ||
| 15 | .Pp | ||
| 16 | The following options are available: | ||
| 17 | .Pp | ||
| 18 | .Bl -tag -width ".It access.proxy" -compact | ||
| 19 | .It listen.tcp_udp Ar address | ||
| 20 | Specifies an address opentracker will listen on for both TCP and UDP connections. If none are specified, opentracker listens on 0.0.0.0:6969 by default. Can be added more than once. | ||
| 21 | .Pp | ||
| 22 | .It listen.tcp Ar address | ||
| 23 | Specifies the address opentracker will listen on for TCP connections. Can be added more than once. | ||
| 24 | .Pp | ||
| 25 | .It listen.udp Ar address | ||
| 26 | Specifies the address opentracker will listen on for UDP connections. Can be added more than once. | ||
| 27 | .Pp | ||
| 28 | .It listen.udp.workers Ar threads | ||
| 29 | Specifies how many threads will be spawned to handle UDP connections. Defaults to 4. | ||
| 30 | .Pp | ||
| 31 | .It access.whitelist Ar path/to/whitelist | ||
| 32 | Specifies the path to the whitelist file containing all torrent hashes that opentracker will serve. Use this option if opentracker runs in a non-open mode. | ||
| 33 | .Pp | ||
| 34 | .It access.blacklist Ar path/to/blacklist | ||
| 35 | Specifies the path to the blacklist file containing all torrent hashes that opentracker will not serve. Use this option if opentracker was compiled to allow blacklisting. | ||
| 36 | .Pp | ||
| 37 | .It access.fifo_add Ar path/to/adder.fifo | ||
| 38 | Specifies the path to the FIFO (named pipe) used for dynamic changesets to accesslists. Info hashes written to this FIFO will be added to the main accesslist file. | ||
| 39 | .Pp | ||
| 40 | .It access.fifo_delete Ar path/to/deleter.fifo | ||
| 41 | Specifies the path to the FIFO (named pipe) used for dynamic changesets to accesslists. Info hashes written to this FIFO will be removed from the main accesslist file. | ||
| 42 | .Pp | ||
| 43 | .It access.stats Ar ip_address_or_network | ||
| 44 | Specifies the IP address or network in CIDR notation allowed to fetch stats from opentracker. | ||
| 45 | .Pp | ||
| 46 | .It access.stats_path Ar path | ||
| 47 | Specifies the path to the stats location. You can configure opentracker to appear anywhere on your tracker. Defaults to /stats. | ||
| 48 | .Pp | ||
| 49 | .It access.proxy Ar ip_address_or_network | ||
| 50 | Specifies the IP address or network of the reverse proxies. Opentracker will take the X-Forwarded-For address instead of the source IP address. Can be added more than once. | ||
| 51 | .Pp | ||
| 52 | .It livesync.cluster.listen Ar ip_address:port | ||
| 53 | Specifies the IP address and port opentracker will listen on for incoming live sync packets to keep a cluster of opentrackers synchronized. | ||
| 54 | .Pp | ||
| 55 | .It livesync.cluster.node_ip Ar ip_address | ||
| 56 | Specifies one trusted IP address for sync between trackers running in a cluster. Can be added more than once. | ||
| 57 | .Pp | ||
| 58 | .It batchsync.cluster.admin_ip Ar ip_address | ||
| 59 | Specifies the admin IP address for old-style (HTTP-based) asynchronous tracker syncing. | ||
| 60 | .Pp | ||
| 61 | .It tracker.rootdir Ar path | ||
| 62 | Specifies the directory opentracker will chroot/chdir to. All black/white list files must be located in this directory. | ||
| 63 | .Pp | ||
| 64 | .It tracker.user Ar username | ||
| 65 | Specifies the user opentracker will setuid to after binding to potentially privileged ports. | ||
| 66 | .Pp | ||
| 67 | .It tracker.redirect_url Ar URL | ||
| 68 | Specifies the URL opentracker will redirect to in response to a "GET / HTTP" request. | ||
| 69 | .El | ||
| 70 | .Sh EXAMPLES | ||
| 71 | To specify the address opentracker will listen on for both TCP and UDP connections: | ||
| 72 | .Dl listen.tcp_udp 0.0.0.0:6969 | ||
| 73 | .Pp | ||
| 74 | To specify the address opentracker will listen on for TCP connections: | ||
| 75 | .Dl listen.tcp 0.0.0.0 | ||
| 76 | .Pp | ||
| 77 | To specify the address opentracker will listen on for UDP connections: | ||
| 78 | .Dl listen.udp 0.0.0.0:6969 | ||
| 79 | .Pp | ||
| 80 | .Sh SEE ALSO | ||
| 81 | .Xr opentracker 1 | ||
| 82 | .Pp | ||
| 83 | .Sh AUTHOR | ||
| 84 | .An Dirk Engling | ||
| 85 | .Aq Mt erdgeist@erdgeist.org | ||
| 86 | .Pp | ||
diff --git a/opentracker.c b/opentracker.c index ff2409c..14e9989 100644 --- a/opentracker.c +++ b/opentracker.c | |||
| @@ -5,59 +5,59 @@ | |||
| 5 | $Id$ */ | 5 | $Id$ */ |
| 6 | 6 | ||
| 7 | /* System */ | 7 | /* System */ |
| 8 | #include <stdlib.h> | ||
| 9 | #include <string.h> | ||
| 10 | #include <arpa/inet.h> | 8 | #include <arpa/inet.h> |
| 11 | #include <sys/socket.h> | 9 | #include <ctype.h> |
| 12 | #include <unistd.h> | ||
| 13 | #include <errno.h> | 10 | #include <errno.h> |
| 11 | #include <pthread.h> | ||
| 12 | #include <pwd.h> | ||
| 14 | #include <signal.h> | 13 | #include <signal.h> |
| 15 | #include <stdio.h> | 14 | #include <stdio.h> |
| 16 | #include <pwd.h> | 15 | #include <stdlib.h> |
| 17 | #include <ctype.h> | 16 | #include <string.h> |
| 18 | #include <pthread.h> | 17 | #include <sys/socket.h> |
| 18 | #include <unistd.h> | ||
| 19 | #ifdef WANT_SYSLOGS | 19 | #ifdef WANT_SYSLOGS |
| 20 | #include <syslog.h> | 20 | #include <syslog.h> |
| 21 | #endif | 21 | #endif |
| 22 | 22 | ||
| 23 | /* Libowfat */ | 23 | /* Libowfat */ |
| 24 | #include "socket.h" | 24 | #include "byte.h" |
| 25 | #include "io.h" | 25 | #include "io.h" |
| 26 | #include "iob.h" | 26 | #include "iob.h" |
| 27 | #include "byte.h" | ||
| 28 | #include "scan.h" | ||
| 29 | #include "ip6.h" | 27 | #include "ip6.h" |
| 28 | #include "scan.h" | ||
| 29 | #include "socket.h" | ||
| 30 | 30 | ||
| 31 | /* Opentracker */ | 31 | /* Opentracker */ |
| 32 | #include "trackerlogic.h" | ||
| 33 | #include "ot_mutex.h" | ||
| 34 | #include "ot_http.h" | ||
| 35 | #include "ot_udp.h" | ||
| 36 | #include "ot_accesslist.h" | 32 | #include "ot_accesslist.h" |
| 37 | #include "ot_stats.h" | 33 | #include "ot_http.h" |
| 38 | #include "ot_livesync.h" | 34 | #include "ot_livesync.h" |
| 35 | #include "ot_mutex.h" | ||
| 36 | #include "ot_stats.h" | ||
| 37 | #include "ot_udp.h" | ||
| 38 | #include "trackerlogic.h" | ||
| 39 | 39 | ||
| 40 | /* Globals */ | 40 | /* Globals */ |
| 41 | time_t g_now_seconds; | 41 | time_t g_now_seconds; |
| 42 | char * g_redirecturl; | 42 | char *g_redirecturl; |
| 43 | uint32_t g_tracker_id; | 43 | uint32_t g_tracker_id; |
| 44 | volatile int g_opentracker_running = 1; | 44 | volatile int g_opentracker_running = 1; |
| 45 | int g_self_pipe[2]; | 45 | int g_self_pipe[2]; |
| 46 | 46 | ||
| 47 | static char * g_serverdir; | 47 | static char *g_serverdir; |
| 48 | static char * g_serveruser; | 48 | static char *g_serveruser; |
| 49 | static unsigned int g_udp_workers; | 49 | static unsigned int g_udp_workers; |
| 50 | 50 | ||
| 51 | static void panic( const char *routing ) __attribute__ ((noreturn)); | 51 | static void panic(const char *routine) __attribute__((noreturn)); |
| 52 | static void panic( const char *routine ) { | 52 | static void panic(const char *routine) { |
| 53 | fprintf( stderr, "%s: %s\n", routine, strerror(errno) ); | 53 | fprintf(stderr, "%s: %s\n", routine, strerror(errno)); |
| 54 | exit( 111 ); | 54 | exit(111); |
| 55 | } | 55 | } |
| 56 | 56 | ||
| 57 | static void signal_handler( int s ) { | 57 | static void signal_handler(int s) { |
| 58 | if( s == SIGINT ) { | 58 | if (s == SIGINT) { |
| 59 | /* Any new interrupt signal quits the application */ | 59 | /* Any new interrupt signal quits the application */ |
| 60 | signal( SIGINT, SIG_DFL); | 60 | signal(SIGINT, SIG_DFL); |
| 61 | 61 | ||
| 62 | /* Tell all other threads to not acquire any new lock on a bucket | 62 | /* Tell all other threads to not acquire any new lock on a bucket |
| 63 | but cancel their operations and return */ | 63 | but cancel their operations and return */ |
| @@ -69,219 +69,231 @@ static void signal_handler( int s ) { | |||
| 69 | closelog(); | 69 | closelog(); |
| 70 | #endif | 70 | #endif |
| 71 | 71 | ||
| 72 | exit( 0 ); | 72 | exit(0); |
| 73 | } else if( s == SIGALRM ) { | ||
| 74 | /* Maintain our copy of the clock. time() on BSDs is very expensive. */ | ||
| 75 | g_now_seconds = time(NULL); | ||
| 76 | alarm(5); | ||
| 77 | } | 73 | } |
| 78 | } | 74 | } |
| 79 | 75 | ||
| 80 | static void defaul_signal_handlers( void ) { | 76 | static void defaul_signal_handlers(void) { |
| 81 | sigset_t signal_mask; | 77 | sigset_t signal_mask; |
| 82 | sigemptyset(&signal_mask); | 78 | sigemptyset(&signal_mask); |
| 83 | sigaddset (&signal_mask, SIGPIPE); | 79 | sigaddset(&signal_mask, SIGPIPE); |
| 84 | sigaddset (&signal_mask, SIGHUP); | 80 | sigaddset(&signal_mask, SIGHUP); |
| 85 | sigaddset (&signal_mask, SIGINT); | 81 | sigaddset(&signal_mask, SIGINT); |
| 86 | sigaddset (&signal_mask, SIGALRM); | 82 | sigaddset(&signal_mask, SIGALRM); |
| 87 | pthread_sigmask (SIG_BLOCK, &signal_mask, NULL); | 83 | pthread_sigmask(SIG_BLOCK, &signal_mask, NULL); |
| 88 | } | 84 | } |
| 89 | 85 | ||
| 90 | static void install_signal_handlers( void ) { | 86 | static void install_signal_handlers(void) { |
| 91 | struct sigaction sa; | 87 | struct sigaction sa; |
| 92 | sigset_t signal_mask; | 88 | sigset_t signal_mask; |
| 93 | sigemptyset(&signal_mask); | 89 | sigemptyset(&signal_mask); |
| 94 | 90 | ||
| 95 | sa.sa_handler = signal_handler; | 91 | sa.sa_handler = signal_handler; |
| 96 | sigemptyset(&sa.sa_mask); | 92 | sigemptyset(&sa.sa_mask); |
| 97 | sa.sa_flags = SA_RESTART; | 93 | sa.sa_flags = SA_RESTART; |
| 98 | if ((sigaction(SIGINT, &sa, NULL) == -1) || (sigaction(SIGALRM, &sa, NULL) == -1) ) | 94 | if ((sigaction(SIGINT, &sa, NULL) == -1) || (sigaction(SIGALRM, &sa, NULL) == -1)) |
| 99 | panic( "install_signal_handlers" ); | 95 | panic("install_signal_handlers"); |
| 100 | 96 | ||
| 101 | sigaddset (&signal_mask, SIGINT); | 97 | sigaddset(&signal_mask, SIGINT); |
| 102 | sigaddset (&signal_mask, SIGALRM); | 98 | pthread_sigmask(SIG_UNBLOCK, &signal_mask, NULL); |
| 103 | pthread_sigmask (SIG_UNBLOCK, &signal_mask, NULL); | ||
| 104 | } | 99 | } |
| 105 | 100 | ||
| 106 | static void usage( char *name ) { | 101 | static void usage(char *name) { |
| 107 | fprintf( stderr, "Usage: %s [-i ip] [-p port] [-P port] [-r redirect] [-d dir] [-u user] [-A ip[/bits]] [-f config] [-s livesyncport]" | 102 | fprintf(stderr, |
| 103 | "Usage: %s [-i ip] [-p port] [-P port] [-r redirect] [-d dir] [-u user] [-A ip[/bits]] [-f config] [-s livesyncport]" | ||
| 108 | #ifdef WANT_ACCESSLIST_BLACK | 104 | #ifdef WANT_ACCESSLIST_BLACK |
| 109 | " [-b blacklistfile]" | 105 | " [-b blacklistfile]" |
| 110 | #elif defined ( WANT_ACCESSLIST_WHITE ) | 106 | #elif defined(WANT_ACCESSLIST_WHITE) |
| 111 | " [-w whitelistfile]" | 107 | " [-w whitelistfile]" |
| 112 | #endif | 108 | #endif |
| 113 | "\n", name ); | 109 | "\n", |
| 110 | name); | ||
| 114 | } | 111 | } |
| 115 | 112 | ||
| 116 | #define HELPLINE(opt,desc) fprintf(stderr, "\t%-10s%s\n",opt,desc) | 113 | #define HELPLINE(opt, desc) fprintf(stderr, "\t%-10s%s\n", opt, desc) |
| 117 | static void help( char *name ) { | 114 | static void help(char *name) { |
| 118 | usage( name ); | 115 | usage(name); |
| 119 | 116 | ||
| 120 | HELPLINE("-f config","include and execute the config file"); | 117 | HELPLINE("-f config", "include and execute the config file"); |
| 121 | HELPLINE("-i ip","specify ip to bind to (default: *, you may specify more than one)"); | 118 | HELPLINE("-i ip", "specify ip to bind to with next -[pP] (default: any, overrides preceeding ones)"); |
| 122 | HELPLINE("-p port","specify tcp port to bind to (default: 6969, you may specify more than one)"); | 119 | HELPLINE("-p port", "do bind to tcp port (default: 6969, you may specify more than one)"); |
| 123 | HELPLINE("-P port","specify udp port to bind to (default: 6969, you may specify more than one)"); | 120 | HELPLINE("-P port", "do bind to udp port (default: 6969, you may specify more than one)"); |
| 124 | HELPLINE("-r redirecturl","specify url where / should be redirected to (default none)"); | 121 | HELPLINE("-r redirecturl", "specify url where / should be redirected to (default none)"); |
| 125 | HELPLINE("-d dir","specify directory to try to chroot to (default: \".\")"); | 122 | HELPLINE("-d dir", "specify directory to try to chroot to (default: \".\")"); |
| 126 | HELPLINE("-u user","specify user under whose privileges opentracker should run (default: \"nobody\")"); | 123 | HELPLINE("-u user", "specify user under whose privileges opentracker should run (default: \"nobody\")"); |
| 127 | HELPLINE("-A ip[/bits]","bless an ip address or net as admin address (e.g. to allow syncs from this address)"); | 124 | HELPLINE("-A ip[/bits]", "bless an ip address or net as admin address (e.g. to allow syncs from this address)"); |
| 128 | #ifdef WANT_ACCESSLIST_BLACK | 125 | #ifdef WANT_ACCESSLIST_BLACK |
| 129 | HELPLINE("-b file","specify blacklist file."); | 126 | HELPLINE("-b file", "specify blacklist file."); |
| 130 | #elif defined( WANT_ACCESSLIST_WHITE ) | 127 | #elif defined(WANT_ACCESSLIST_WHITE) |
| 131 | HELPLINE("-w file","specify whitelist file."); | 128 | HELPLINE("-w file", "specify whitelist file."); |
| 132 | #endif | 129 | #endif |
| 133 | 130 | ||
| 134 | fprintf( stderr, "\nExample: ./opentracker -i 127.0.0.1 -p 6969 -P 6969 -f ./opentracker.conf -i 10.1.1.23 -p 2710 -p 80\n" ); | 131 | fprintf(stderr, "\nExample: ./opentracker -i 127.0.0.1 -p 6969 -P 6969 -f ./opentracker.conf -i 10.1.1.23 -p 2710 -p 80\n"); |
| 132 | fprintf(stderr, " Here -i 127.0.0.1 selects the ip address for the next -p 6969 and -P 6969.\n"); | ||
| 133 | fprintf(stderr, " If no port is bound from config file or command line, the last address given\n"); | ||
| 134 | fprintf(stderr, " (or ::1 if none is set) will be used on port 6969.\n"); | ||
| 135 | } | 135 | } |
| 136 | #undef HELPLINE | 136 | #undef HELPLINE |
| 137 | 137 | ||
| 138 | static ssize_t header_complete( char * request, ssize_t byte_count ) { | 138 | static ssize_t header_complete(char *request, ssize_t byte_count) { |
| 139 | ssize_t i = 0, state = 0; | 139 | ssize_t i = 0, state = 0; |
| 140 | 140 | ||
| 141 | for( i=1; i < byte_count; i+=2 ) | 141 | for (i = 1; i < byte_count; i += 2) |
| 142 | if( request[i] <= 13 ) { | 142 | if (request[i] <= 13) { |
| 143 | i--; | 143 | i--; |
| 144 | for( state = 0 ; i < byte_count; ++i ) { | 144 | for (state = 0; i < byte_count; ++i) { |
| 145 | char c = request[i]; | 145 | char c = request[i]; |
| 146 | if( c == '\r' || c == '\n' ) | 146 | if (c == '\r' || c == '\n') |
| 147 | state = ( state >> 2 ) | ( ( c << 6 ) & 0xc0 ); | 147 | state = (state >> 2) | ((c << 6) & 0xc0); |
| 148 | else | 148 | else |
| 149 | break; | 149 | break; |
| 150 | if( state >= 0xa0 || state == 0x99 ) return i + 1; | 150 | if (state >= 0xa0 || state == 0x99) |
| 151 | return i + 1; | ||
| 151 | } | 152 | } |
| 152 | } | 153 | } |
| 153 | return 0; | 154 | return 0; |
| 154 | } | 155 | } |
| 155 | 156 | ||
| 156 | static void handle_dead( const int64 sock ) { | 157 | static void handle_dead(const int64 sock) { |
| 157 | struct http_data* cookie=io_getcookie( sock ); | 158 | struct http_data *cookie = io_getcookie(sock); |
| 158 | if( cookie ) { | 159 | if (cookie) { |
| 159 | size_t i; | 160 | size_t i; |
| 160 | for ( i = 0; i < cookie->batches; ++i) | 161 | for (i = 0; i < cookie->batches; ++i) |
| 161 | iob_reset( cookie->batch + i ); | 162 | iob_reset(cookie->batch + i); |
| 162 | free( cookie->batch ); | 163 | free(cookie->batch); |
| 163 | array_reset( &cookie->request ); | 164 | array_reset(&cookie->request); |
| 164 | if( cookie->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK ) | 165 | if (cookie->flag & (STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER)) |
| 165 | mutex_workqueue_canceltask( sock ); | 166 | mutex_workqueue_canceltask(sock); |
| 166 | free( cookie ); | 167 | free(cookie); |
| 167 | } | 168 | } |
| 168 | io_close( sock ); | 169 | io_close(sock); |
| 169 | } | 170 | } |
| 170 | 171 | ||
| 171 | static void handle_read( const int64 sock, struct ot_workstruct *ws ) { | 172 | static void handle_read(const int64 sock, struct ot_workstruct *ws) { |
| 172 | struct http_data* cookie = io_getcookie( sock ); | 173 | struct http_data *cookie = io_getcookie(sock); |
| 173 | ssize_t byte_count = io_tryread( sock, ws->inbuf, G_INBUF_SIZE ); | 174 | ssize_t byte_count = io_tryread(sock, ws->inbuf, G_INBUF_SIZE); |
| 174 | 175 | ||
| 175 | if( byte_count == 0 || byte_count == -3 ) { | 176 | if (byte_count == 0 || byte_count == -3) { |
| 176 | handle_dead( sock ); | 177 | handle_dead(sock); |
| 177 | return; | 178 | return; |
| 178 | } | 179 | } |
| 179 | 180 | ||
| 180 | if( byte_count == -1) | 181 | if (byte_count == -1) |
| 181 | return; | 182 | return; |
| 182 | 183 | ||
| 183 | /* If we get the whole request in one packet, handle it without copying */ | 184 | /* If we get the whole request in one packet, handle it without copying */ |
| 184 | if( !array_start( &cookie->request ) ) { | 185 | if (!array_start(&cookie->request)) { |
| 185 | if( ( ws->header_size = header_complete( ws->inbuf, byte_count ) ) ) { | 186 | if ((ws->header_size = header_complete(ws->inbuf, byte_count))) { |
| 186 | ws->request = ws->inbuf; | 187 | ws->request = ws->inbuf; |
| 187 | ws->request_size = byte_count; | 188 | ws->request_size = byte_count; |
| 188 | http_handle_request( sock, ws ); | 189 | http_handle_request(sock, ws); |
| 189 | } else | 190 | } else |
| 190 | array_catb( &cookie->request, ws->inbuf, (size_t)byte_count ); | 191 | array_catb(&cookie->request, ws->inbuf, (size_t)byte_count); |
| 191 | return; | 192 | return; |
| 192 | } | 193 | } |
| 193 | 194 | ||
| 194 | array_catb( &cookie->request, ws->inbuf, byte_count ); | 195 | array_catb(&cookie->request, ws->inbuf, byte_count); |
| 195 | if( array_failed( &cookie->request ) || array_bytes( &cookie->request ) > 8192 ) { | 196 | if (array_failed(&cookie->request) || array_bytes(&cookie->request) > 8192) { |
| 196 | http_issue_error( sock, ws, CODE_HTTPERROR_500 ); | 197 | http_issue_error(sock, ws, CODE_HTTPERROR_500); |
| 197 | return; | 198 | return; |
| 198 | } | 199 | } |
| 199 | 200 | ||
| 200 | while( ( ws->header_size = header_complete( array_start( &cookie->request ), array_bytes( &cookie->request ) ) ) ) { | 201 | while ((ws->header_size = header_complete(array_start(&cookie->request), array_bytes(&cookie->request)))) { |
| 201 | ws->request = array_start( &cookie->request ); | 202 | ws->request = array_start(&cookie->request); |
| 202 | ws->request_size = array_bytes( &cookie->request ); | 203 | ws->request_size = array_bytes(&cookie->request); |
| 203 | http_handle_request( sock, ws ); | 204 | http_handle_request(sock, ws); |
| 204 | #ifdef WANT_KEEPALIVE | 205 | #ifdef WANT_KEEPALIVE |
| 205 | if( !ws->keep_alive ) | 206 | if (!ws->keep_alive) |
| 206 | #endif | 207 | #endif |
| 207 | return; | 208 | return; |
| 208 | } | 209 | } |
| 209 | } | 210 | } |
| 210 | 211 | ||
| 211 | static void handle_write( const int64 sock ) { | 212 | static void handle_write(const int64 sock) { |
| 212 | struct http_data* cookie=io_getcookie( sock ); | 213 | struct http_data *cookie = io_getcookie(sock); |
| 213 | size_t i; | 214 | size_t i; |
| 215 | int chunked = 0; | ||
| 214 | 216 | ||
| 215 | /* Look for the first io_batch still containing bytes to write */ | 217 | /* Look for the first io_batch still containing bytes to write */ |
| 216 | if( cookie ) | 218 | if (cookie) { |
| 217 | for( i = 0; i < cookie->batches; ++i ) | 219 | if (cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER) |
| 218 | if( cookie->batch[i].bytesleft ) { | 220 | chunked = 1; |
| 219 | int64 res = iob_send( sock, cookie->batch + i ); | ||
| 220 | 221 | ||
| 221 | if( res == -3 ) | 222 | for (i = 0; i < cookie->batches; ++i) { |
| 222 | break; | 223 | if (cookie->batch[i].bytesleft) { |
| 224 | int64 res = iob_send(sock, cookie->batch + i); | ||
| 225 | |||
| 226 | if (res == -3) { | ||
| 227 | handle_dead(sock); | ||
| 228 | return; | ||
| 229 | } | ||
| 223 | 230 | ||
| 224 | if( !cookie->batch[i].bytesleft ) | 231 | if (!cookie->batch[i].bytesleft) |
| 225 | continue; | 232 | continue; |
| 226 | 233 | ||
| 227 | if( res == -1 || res > 0 || i < cookie->batches - 1 ) | 234 | if (res == -1 || res > 0 || i < cookie->batches - 1) |
| 228 | return; | 235 | return; |
| 229 | } | 236 | } |
| 237 | } | ||
| 238 | } | ||
| 230 | 239 | ||
| 231 | handle_dead( sock ); | 240 | /* In a chunked transfer after all batches accumulated have been sent, wait for the next one */ |
| 241 | if (chunked) | ||
| 242 | io_dontwantwrite(sock); | ||
| 243 | else | ||
| 244 | handle_dead(sock); | ||
| 232 | } | 245 | } |
| 233 | 246 | ||
| 234 | static void handle_accept( const int64 serversocket ) { | 247 | static void handle_accept(const int64 serversocket) { |
| 235 | struct http_data *cookie; | 248 | struct http_data *cookie; |
| 236 | int64 sock; | 249 | int64 sock; |
| 237 | ot_ip6 ip; | 250 | ot_ip6 ip; |
| 238 | uint16 port; | 251 | uint16 port; |
| 239 | tai6464 t; | 252 | tai6464 t; |
| 240 | 253 | ||
| 241 | while( ( sock = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) { | 254 | while ((sock = socket_accept6(serversocket, ip, &port, NULL)) != -1) { |
| 242 | 255 | ||
| 243 | /* Put fd into a non-blocking mode */ | 256 | /* Put fd into a non-blocking mode */ |
| 244 | io_nonblock( sock ); | 257 | io_nonblock(sock); |
| 245 | 258 | ||
| 246 | if( !io_fd( sock ) || | 259 | if (!io_fd(sock) || !(cookie = (struct http_data *)malloc(sizeof(struct http_data)))) { |
| 247 | !( cookie = (struct http_data*)malloc( sizeof(struct http_data) ) ) ) { | 260 | io_close(sock); |
| 248 | io_close( sock ); | ||
| 249 | continue; | 261 | continue; |
| 250 | } | 262 | } |
| 251 | memset(cookie, 0, sizeof( struct http_data ) ); | 263 | memset(cookie, 0, sizeof(struct http_data)); |
| 252 | memcpy(cookie->ip,ip,sizeof(ot_ip6)); | 264 | memcpy(cookie->ip, ip, sizeof(ot_ip6)); |
| 253 | 265 | ||
| 254 | io_setcookie( sock, cookie ); | 266 | io_setcookie(sock, cookie); |
| 255 | io_wantread( sock ); | 267 | io_wantread(sock); |
| 256 | 268 | ||
| 257 | stats_issue_event( EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip); | 269 | stats_issue_event(EVENT_ACCEPT, FLAG_TCP, (uintptr_t)ip); |
| 258 | 270 | ||
| 259 | /* That breaks taia encapsulation. But there is no way to take system | 271 | /* That breaks taia encapsulation. But there is no way to take system |
| 260 | time this often in FreeBSD and libowfat does not allow to set unix time */ | 272 | time this often in FreeBSD and libowfat does not allow to set unix time */ |
| 261 | taia_uint( &t, 0 ); /* Clear t */ | 273 | taia_uint(&t, 0); /* Clear t */ |
| 262 | tai_unix( &(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT) ); | 274 | tai_unix(&(t.sec), (g_now_seconds + OT_CLIENT_TIMEOUT)); |
| 263 | io_timeout( sock, t ); | 275 | io_timeout(sock, t); |
| 264 | } | 276 | } |
| 265 | io_eagain(serversocket); | 277 | io_eagain(serversocket); |
| 266 | } | 278 | } |
| 267 | 279 | ||
| 268 | static void * server_mainloop( void * args ) { | 280 | static void *server_mainloop(void *args) { |
| 269 | struct ot_workstruct ws; | 281 | struct ot_workstruct ws; |
| 270 | time_t next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; | 282 | time_t next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; |
| 271 | struct iovec *iovector; | 283 | struct iovec *iovector; |
| 272 | int iovec_entries; | 284 | int iovec_entries, is_partial; |
| 273 | 285 | ||
| 274 | (void)args; | 286 | (void)args; |
| 275 | 287 | ||
| 276 | /* Initialize our "thread local storage" */ | 288 | /* Initialize our "thread local storage" */ |
| 277 | ws.inbuf = malloc( G_INBUF_SIZE ); | 289 | ws.inbuf = malloc(G_INBUF_SIZE); |
| 278 | ws.outbuf = malloc( G_OUTBUF_SIZE ); | 290 | ws.outbuf = malloc(G_OUTBUF_SIZE); |
| 279 | #ifdef _DEBUG_HTTPERROR | 291 | #ifdef _DEBUG_HTTPERROR |
| 280 | ws.debugbuf= malloc( G_DEBUGBUF_SIZE ); | 292 | ws.debugbuf = malloc(G_DEBUGBUF_SIZE); |
| 281 | #endif | 293 | #endif |
| 282 | 294 | ||
| 283 | if( !ws.inbuf || !ws.outbuf ) | 295 | if (!ws.inbuf || !ws.outbuf) |
| 284 | panic( "Initializing worker failed" ); | 296 | panic("Initializing worker failed"); |
| 285 | 297 | ||
| 286 | #ifdef WANT_ARC4RANDOM | 298 | #ifdef WANT_ARC4RANDOM |
| 287 | arc4random_buf(&ws.rand48_state[0], 3 * sizeof(uint16_t)); | 299 | arc4random_buf(&ws.rand48_state[0], 3 * sizeof(uint16_t)); |
| @@ -291,323 +303,332 @@ static void * server_mainloop( void * args ) { | |||
| 291 | ws.rand48_state[2] = (uint16_t)random(); | 303 | ws.rand48_state[2] = (uint16_t)random(); |
| 292 | #endif | 304 | #endif |
| 293 | 305 | ||
| 294 | for( ; ; ) { | 306 | for (;;) { |
| 295 | int64 sock; | 307 | int64 sock; |
| 296 | 308 | ||
| 297 | io_wait(); | 309 | io_wait(); |
| 298 | 310 | ||
| 299 | while( ( sock = io_canread( ) ) != -1 ) { | 311 | while ((sock = io_canread()) != -1) { |
| 300 | const void *cookie = io_getcookie( sock ); | 312 | const void *cookie = io_getcookie(sock); |
| 301 | if( (intptr_t)cookie == FLAG_TCP ) | 313 | if ((intptr_t)cookie == FLAG_TCP) |
| 302 | handle_accept( sock ); | 314 | handle_accept(sock); |
| 303 | else if( (intptr_t)cookie == FLAG_UDP ) | 315 | else if ((intptr_t)cookie == FLAG_UDP) |
| 304 | handle_udp6( sock, &ws ); | 316 | handle_udp6(sock, &ws); |
| 305 | else if( (intptr_t)cookie == FLAG_SELFPIPE ) | 317 | else if ((intptr_t)cookie == FLAG_SELFPIPE) |
| 306 | io_tryread( sock, ws.inbuf, G_INBUF_SIZE ); | 318 | io_tryread(sock, ws.inbuf, G_INBUF_SIZE); |
| 307 | else | 319 | else |
| 308 | handle_read( sock, &ws ); | 320 | handle_read(sock, &ws); |
| 309 | } | 321 | } |
| 310 | 322 | ||
| 311 | while( ( sock = mutex_workqueue_popresult( &iovec_entries, &iovector ) ) != -1 ) | 323 | while ((sock = mutex_workqueue_popresult(&iovec_entries, &iovector, &is_partial)) != -1) |
| 312 | http_sendiovecdata( sock, &ws, iovec_entries, iovector ); | 324 | http_sendiovecdata(sock, &ws, iovec_entries, iovector, is_partial); |
| 313 | 325 | ||
| 314 | while( ( sock = io_canwrite( ) ) != -1 ) | 326 | while ((sock = io_canwrite()) != -1) |
| 315 | handle_write( sock ); | 327 | handle_write(sock); |
| 316 | 328 | ||
| 317 | if( g_now_seconds > next_timeout_check ) { | 329 | if (g_now_seconds > next_timeout_check) { |
| 318 | while( ( sock = io_timeouted() ) != -1 ) | 330 | while ((sock = io_timeouted()) != -1) |
| 319 | handle_dead( sock ); | 331 | handle_dead(sock); |
| 320 | next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; | 332 | next_timeout_check = g_now_seconds + OT_CLIENT_TIMEOUT_CHECKINTERVAL; |
| 321 | } | 333 | } |
| 322 | 334 | ||
| 323 | livesync_ticker(); | 335 | livesync_ticker(); |
| 324 | |||
| 325 | /* Enforce setting the clock */ | ||
| 326 | signal_handler( SIGALRM ); | ||
| 327 | } | 336 | } |
| 328 | return 0; | 337 | return 0; |
| 329 | } | 338 | } |
| 330 | 339 | ||
| 331 | static int64_t ot_try_bind( ot_ip6 ip, uint16_t port, PROTO_FLAG proto ) { | 340 | static int64_t ot_try_bind(ot_ip6 ip, uint16_t port, PROTO_FLAG proto) { |
| 332 | int64 sock = proto == FLAG_TCP ? socket_tcp6( ) : socket_udp6( ); | 341 | int64 sock = proto == FLAG_TCP ? socket_tcp6() : socket_udp6(); |
| 333 | |||
| 334 | #ifndef WANT_V6 | ||
| 335 | if( !ip6_isv4mapped(ip) ) { | ||
| 336 | exerr( "V4 Tracker is V4 only!" ); | ||
| 337 | } | ||
| 338 | #else | ||
| 339 | if( ip6_isv4mapped(ip) ) { | ||
| 340 | exerr( "V6 Tracker is V6 only!" ); | ||
| 341 | } | ||
| 342 | #endif | ||
| 343 | 342 | ||
| 344 | #ifdef _DEBUG | 343 | #ifdef _DEBUG |
| 345 | { | 344 | { |
| 346 | char *protos[] = {"TCP","UDP","UDP mcast"}; | 345 | char *protos[] = {"TCP", "UDP", "UDP mcast"}; |
| 347 | char _debug[512]; | 346 | char _debug[512]; |
| 348 | int off = snprintf( _debug, sizeof(_debug), "Binding socket type %s to address [", protos[proto] ); | 347 | int off = snprintf(_debug, sizeof(_debug), "Binding socket type %s to address [", protos[proto]); |
| 349 | off += fmt_ip6c( _debug+off, ip); | 348 | off += fmt_ip6c(_debug + off, ip); |
| 350 | snprintf( _debug + off, sizeof(_debug)-off, "]:%d...", port); | 349 | snprintf(_debug + off, sizeof(_debug) - off, "]:%d...", port); |
| 351 | fputs( _debug, stderr ); | 350 | fputs(_debug, stderr); |
| 352 | } | 351 | } |
| 353 | #endif | 352 | #endif |
| 354 | 353 | ||
| 355 | if( socket_bind6_reuse( sock, ip, port, 0 ) == -1 ) | 354 | if (socket_bind6_reuse(sock, ip, port, 0) == -1) |
| 356 | panic( "socket_bind6_reuse" ); | 355 | panic("socket_bind6_reuse"); |
| 357 | 356 | ||
| 358 | if( ( proto == FLAG_TCP ) && ( socket_listen( sock, SOMAXCONN) == -1 ) ) | 357 | if ((proto == FLAG_TCP) && (socket_listen(sock, SOMAXCONN) == -1)) |
| 359 | panic( "socket_listen" ); | 358 | panic("socket_listen"); |
| 360 | 359 | ||
| 361 | if( !io_fd( sock ) ) | 360 | if (!io_fd(sock)) |
| 362 | panic( "io_fd" ); | 361 | panic("io_fd"); |
| 363 | 362 | ||
| 364 | io_setcookie( sock, (void*)proto ); | 363 | io_setcookie(sock, (void *)proto); |
| 365 | 364 | ||
| 366 | if( (proto == FLAG_UDP) && g_udp_workers ) { | 365 | if ((proto == FLAG_UDP) && g_udp_workers) { |
| 367 | io_block( sock ); | 366 | io_block(sock); |
| 368 | udp_init( sock, g_udp_workers ); | 367 | udp_init(sock, g_udp_workers); |
| 369 | } else | 368 | } else |
| 370 | io_wantread( sock ); | 369 | io_wantread(sock); |
| 371 | 370 | ||
| 372 | #ifdef _DEBUG | 371 | #ifdef _DEBUG |
| 373 | fputs( " success.\n", stderr); | 372 | fputs(" success.\n", stderr); |
| 374 | #endif | 373 | #endif |
| 375 | 374 | ||
| 376 | return sock; | 375 | return sock; |
| 377 | } | 376 | } |
| 378 | 377 | ||
| 379 | char * set_config_option( char **option, char *value ) { | 378 | char *set_config_option(char **option, char *value) { |
| 380 | #ifdef _DEBUG | 379 | #ifdef _DEBUG |
| 381 | fprintf( stderr, "Setting config option: %s\n", value ); | 380 | fprintf(stderr, "Setting config option: %s\n", value); |
| 382 | #endif | 381 | #endif |
| 383 | while( isspace(*value) ) ++value; | 382 | while (isspace(*value)) |
| 384 | free( *option ); | 383 | ++value; |
| 385 | return *option = strdup( value ); | 384 | free(*option); |
| 385 | return *option = strdup(value); | ||
| 386 | } | 386 | } |
| 387 | 387 | ||
| 388 | static int scan_ip6_port( const char *src, ot_ip6 ip, uint16 *port ) { | 388 | static int scan_ip6_port(const char *src, ot_ip6 ip, uint16 *port) { |
| 389 | const char *s = src; | 389 | const char *s = src; |
| 390 | int off, bracket = 0; | 390 | int off, bracket = 0; |
| 391 | while( isspace(*s) ) ++s; | 391 | while (isspace(*s)) |
| 392 | if( *s == '[' ) ++s, ++bracket; /* for v6 style notation */ | 392 | ++s; |
| 393 | if( !(off = scan_ip6( s, ip ) ) ) | 393 | if (*s == '[') |
| 394 | ++s, ++bracket; /* for v6 style notation */ | ||
| 395 | if (!(off = scan_ip6(s, ip))) | ||
| 394 | return 0; | 396 | return 0; |
| 395 | s += off; | 397 | s += off; |
| 396 | if( bracket && *s == ']' ) ++s; | 398 | if (bracket && *s == ']') |
| 397 | if( *s == 0 || isspace(*s)) return s-src; | 399 | ++s; |
| 398 | if( !ip6_isv4mapped(ip)) { | 400 | if (*s == 0 || isspace(*s)) |
| 399 | if( *s != ':' && *s != '.' ) return 0; | 401 | return s - src; |
| 400 | if( !bracket && *(s) == ':' ) return 0; | 402 | if (!ip6_isv4mapped(ip)) { |
| 403 | if (*s != ':' && *s != '.') | ||
| 404 | return 0; | ||
| 405 | if (!bracket && *(s) == ':') | ||
| 406 | return 0; | ||
| 401 | s++; | 407 | s++; |
| 402 | } else { | 408 | } else { |
| 403 | if( *(s++) != ':' ) return 0; | 409 | if (*(s++) != ':') |
| 410 | return 0; | ||
| 404 | } | 411 | } |
| 405 | if( !(off = scan_ushort (s, port ) ) ) | 412 | if (!(off = scan_ushort(s, port))) |
| 406 | return 0; | 413 | return 0; |
| 407 | return off+s-src; | 414 | return off + s - src; |
| 408 | } | 415 | } |
| 409 | 416 | ||
| 410 | static int scan_ip6_net( const char *src, ot_net *net) { | 417 | static int scan_ip6_net(const char *src, ot_net *net) { |
| 411 | const char *s = src; | 418 | const char *s = src; |
| 412 | int off; | 419 | int off; |
| 413 | while( isspace(*s) ) ++s; | 420 | while (isspace(*s)) |
| 414 | if( !(off = scan_ip6( s, net->address ) ) ) | 421 | ++s; |
| 422 | if (!(off = scan_ip6(s, net->address))) | ||
| 415 | return 0; | 423 | return 0; |
| 416 | s += off; | 424 | s += off; |
| 417 | if(*s!='/') | 425 | if (*s != '/') |
| 418 | net->bits = 128; | 426 | net->bits = 128; |
| 419 | else { | 427 | else { |
| 420 | s++; | 428 | s++; |
| 421 | if( !(off = scan_int (s, &net->bits ) ) ) | 429 | if (!(off = scan_int(s, &net->bits))) |
| 422 | return 0; | 430 | return 0; |
| 423 | if( ip6_isv4mapped(net->address)) | 431 | if (ip6_isv4mapped(net->address)) |
| 424 | net->bits += 96; | 432 | net->bits += 96; |
| 425 | if(net->bits > 128) | 433 | if (net->bits > 128) |
| 426 | return 0; | 434 | return 0; |
| 427 | s += off; | 435 | s += off; |
| 428 | } | 436 | } |
| 429 | return off+s-src; | 437 | return off + s - src; |
| 430 | } | 438 | } |
| 431 | 439 | ||
| 432 | int parse_configfile( char * config_filename ) { | 440 | int parse_configfile(char *config_filename) { |
| 433 | FILE * accesslist_filehandle; | 441 | FILE *accesslist_filehandle; |
| 434 | char inbuf[512]; | 442 | char inbuf[512]; |
| 435 | ot_ip6 tmpip; | 443 | ot_ip6 tmpip; |
| 436 | #if defined(WANT_RESTRICT_STATS) || defined(WANT_IP_FROM_PROXY) || defined(WANT_SYNC_LIVE) | 444 | #if defined(WANT_RESTRICT_STATS) || defined(WANT_IP_FROM_PROXY) || defined(WANT_SYNC_LIVE) |
| 437 | ot_net tmpnet; | 445 | ot_net tmpnet; |
| 438 | #endif | 446 | #endif |
| 439 | int bound = 0; | 447 | int bound = 0; |
| 440 | 448 | ||
| 441 | accesslist_filehandle = fopen( config_filename, "r" ); | 449 | accesslist_filehandle = fopen(config_filename, "r"); |
| 442 | 450 | ||
| 443 | if( accesslist_filehandle == NULL ) { | 451 | if (accesslist_filehandle == NULL) { |
| 444 | fprintf( stderr, "Warning: Can't open config file: %s.", config_filename ); | 452 | fprintf(stderr, "Warning: Can't open config file: %s.", config_filename); |
| 445 | return 0; | 453 | return 0; |
| 446 | } | 454 | } |
| 447 | 455 | ||
| 448 | while( fgets( inbuf, sizeof(inbuf), accesslist_filehandle ) ) { | 456 | while (fgets(inbuf, sizeof(inbuf), accesslist_filehandle)) { |
| 449 | char *p = inbuf; | 457 | char *p = inbuf; |
| 450 | size_t strl; | 458 | size_t strl; |
| 451 | 459 | ||
| 452 | /* Skip white spaces */ | 460 | /* Skip white spaces */ |
| 453 | while(isspace(*p)) ++p; | 461 | while (isspace(*p)) |
| 462 | ++p; | ||
| 454 | 463 | ||
| 455 | /* Ignore comments and empty lines */ | 464 | /* Ignore comments and empty lines */ |
| 456 | if((*p=='#')||(*p=='\n')||(*p==0)) continue; | 465 | if ((*p == '#') || (*p == '\n') || (*p == 0)) |
| 466 | continue; | ||
| 457 | 467 | ||
| 458 | /* consume trailing new lines and spaces */ | 468 | /* consume trailing new lines and spaces */ |
| 459 | strl = strlen(p); | 469 | strl = strlen(p); |
| 460 | while( strl && isspace(p[strl-1])) | 470 | while (strl && isspace(p[strl - 1])) |
| 461 | p[--strl] = 0; | 471 | p[--strl] = 0; |
| 462 | 472 | ||
| 463 | /* Scan for commands */ | 473 | /* Scan for commands */ |
| 464 | if(!byte_diff(p,15,"tracker.rootdir" ) && isspace(p[15])) { | 474 | if (!byte_diff(p, 15, "tracker.rootdir") && isspace(p[15])) { |
| 465 | set_config_option( &g_serverdir, p+16 ); | 475 | set_config_option(&g_serverdir, p + 16); |
| 466 | } else if(!byte_diff(p,12,"tracker.user" ) && isspace(p[12])) { | 476 | } else if (!byte_diff(p, 12, "tracker.user") && isspace(p[12])) { |
| 467 | set_config_option( &g_serveruser, p+13 ); | 477 | set_config_option(&g_serveruser, p + 13); |
| 468 | } else if(!byte_diff(p,14,"listen.tcp_udp" ) && isspace(p[14])) { | 478 | } else if (!byte_diff(p, 14, "listen.tcp_udp") && isspace(p[14])) { |
| 469 | uint16_t tmpport = 6969; | 479 | uint16_t tmpport = 6969; |
| 470 | if( !scan_ip6_port( p+15, tmpip, &tmpport )) goto parse_error; | 480 | if (!scan_ip6_port(p + 15, tmpip, &tmpport)) |
| 471 | ot_try_bind( tmpip, tmpport, FLAG_TCP ); ++bound; | 481 | goto parse_error; |
| 472 | ot_try_bind( tmpip, tmpport, FLAG_UDP ); ++bound; | 482 | ot_try_bind(tmpip, tmpport, FLAG_TCP); |
| 473 | } else if(!byte_diff(p,10,"listen.tcp" ) && isspace(p[10])) { | 483 | ++bound; |
| 484 | ot_try_bind(tmpip, tmpport, FLAG_UDP); | ||
| 485 | ++bound; | ||
| 486 | } else if (!byte_diff(p, 10, "listen.tcp") && isspace(p[10])) { | ||
| 474 | uint16_t tmpport = 6969; | 487 | uint16_t tmpport = 6969; |
| 475 | if( !scan_ip6_port( p+11, tmpip, &tmpport )) goto parse_error; | 488 | if (!scan_ip6_port(p + 11, tmpip, &tmpport)) |
| 476 | ot_try_bind( tmpip, tmpport, FLAG_TCP ); | 489 | goto parse_error; |
| 490 | ot_try_bind(tmpip, tmpport, FLAG_TCP); | ||
| 477 | ++bound; | 491 | ++bound; |
| 478 | } else if(!byte_diff(p, 10, "listen.udp" ) && isspace(p[10])) { | 492 | } else if (!byte_diff(p, 10, "listen.udp") && isspace(p[10])) { |
| 479 | uint16_t tmpport = 6969; | 493 | uint16_t tmpport = 6969; |
| 480 | if( !scan_ip6_port( p+11, tmpip, &tmpport )) goto parse_error; | 494 | if (!scan_ip6_port(p + 11, tmpip, &tmpport)) |
| 481 | ot_try_bind( tmpip, tmpport, FLAG_UDP ); | 495 | goto parse_error; |
| 496 | ot_try_bind(tmpip, tmpport, FLAG_UDP); | ||
| 482 | ++bound; | 497 | ++bound; |
| 483 | } else if(!byte_diff(p,18,"listen.udp.workers" ) && isspace(p[18])) { | 498 | } else if (!byte_diff(p, 18, "listen.udp.workers") && isspace(p[18])) { |
| 484 | char *value = p + 18; | 499 | char *value = p + 18; |
| 485 | while( isspace(*value) ) ++value; | 500 | while (isspace(*value)) |
| 486 | scan_uint( value, &g_udp_workers ); | 501 | ++value; |
| 502 | scan_uint(value, &g_udp_workers); | ||
| 487 | #ifdef WANT_ACCESSLIST_WHITE | 503 | #ifdef WANT_ACCESSLIST_WHITE |
| 488 | } else if(!byte_diff(p, 16, "access.whitelist" ) && isspace(p[16])) { | 504 | } else if (!byte_diff(p, 16, "access.whitelist") && isspace(p[16])) { |
| 489 | set_config_option( &g_accesslist_filename, p+17 ); | 505 | set_config_option(&g_accesslist_filename, p + 17); |
| 490 | #elif defined( WANT_ACCESSLIST_BLACK ) | 506 | #elif defined(WANT_ACCESSLIST_BLACK) |
| 491 | } else if(!byte_diff(p, 16, "access.blacklist" ) && isspace(p[16])) { | 507 | } else if (!byte_diff(p, 16, "access.blacklist") && isspace(p[16])) { |
| 492 | set_config_option( &g_accesslist_filename, p+17 ); | 508 | set_config_option(&g_accesslist_filename, p + 17); |
| 493 | #endif | 509 | #endif |
| 494 | #ifdef WANT_DYNAMIC_ACCESSLIST | 510 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| 495 | } else if(!byte_diff(p, 15, "access.fifo_add" ) && isspace(p[15])) { | 511 | } else if (!byte_diff(p, 15, "access.fifo_add") && isspace(p[15])) { |
| 496 | set_config_option( &g_accesslist_pipe_add, p+16 ); | 512 | set_config_option(&g_accesslist_pipe_add, p + 16); |
| 497 | } else if(!byte_diff(p, 18, "access.fifo_delete" ) && isspace(p[18])) { | 513 | } else if (!byte_diff(p, 18, "access.fifo_delete") && isspace(p[18])) { |
| 498 | set_config_option( &g_accesslist_pipe_delete, p+19 ); | 514 | set_config_option(&g_accesslist_pipe_delete, p + 19); |
| 499 | #endif | 515 | #endif |
| 500 | #ifdef WANT_RESTRICT_STATS | 516 | #ifdef WANT_RESTRICT_STATS |
| 501 | } else if(!byte_diff(p, 12, "access.stats" ) && isspace(p[12])) { | 517 | } else if (!byte_diff(p, 12, "access.stats") && isspace(p[12])) { |
| 502 | if( !scan_ip6_net( p+13, &tmpnet )) goto parse_error; | 518 | if (!scan_ip6_net(p + 13, &tmpnet)) |
| 503 | accesslist_bless_net( &tmpnet, OT_PERMISSION_MAY_STAT ); | 519 | goto parse_error; |
| 520 | accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_STAT); | ||
| 504 | #endif | 521 | #endif |
| 505 | } else if(!byte_diff(p, 17, "access.stats_path" ) && isspace(p[17])) { | 522 | } else if (!byte_diff(p, 17, "access.stats_path") && isspace(p[17])) { |
| 506 | set_config_option( &g_stats_path, p+18 ); | 523 | set_config_option(&g_stats_path, p + 18); |
| 507 | #ifdef WANT_IP_FROM_PROXY | 524 | #ifdef WANT_IP_FROM_PROXY |
| 508 | } else if(!byte_diff(p, 12, "access.proxy" ) && isspace(p[12])) { | 525 | } else if (!byte_diff(p, 12, "access.proxy") && isspace(p[12])) { |
| 509 | if( !scan_ip6_net( p+13, &tmpnet )) goto parse_error; | 526 | if (!scan_ip6_net(p + 13, &tmpnet)) |
| 510 | accesslist_bless_net( &tmpnet, OT_PERMISSION_MAY_PROXY ); | 527 | goto parse_error; |
| 528 | accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_PROXY); | ||
| 511 | #endif | 529 | #endif |
| 512 | } else if(!byte_diff(p, 20, "tracker.redirect_url" ) && isspace(p[20])) { | 530 | } else if (!byte_diff(p, 20, "tracker.redirect_url") && isspace(p[20])) { |
| 513 | set_config_option( &g_redirecturl, p+21 ); | 531 | set_config_option(&g_redirecturl, p + 21); |
| 514 | #ifdef WANT_SYNC_LIVE | 532 | #ifdef WANT_SYNC_LIVE |
| 515 | } else if(!byte_diff(p, 24, "livesync.cluster.node_ip" ) && isspace(p[24])) { | 533 | } else if (!byte_diff(p, 24, "livesync.cluster.node_ip") && isspace(p[24])) { |
| 516 | if( !scan_ip6_net( p+25, &tmpnet )) goto parse_error; | 534 | if (!scan_ip6_net(p + 25, &tmpnet)) |
| 517 | accesslist_bless_net( &tmpnet, OT_PERMISSION_MAY_LIVESYNC ); | 535 | goto parse_error; |
| 518 | } else if(!byte_diff(p, 23, "livesync.cluster.listen" ) && isspace(p[23])) { | 536 | accesslist_bless_net(&tmpnet, OT_PERMISSION_MAY_LIVESYNC); |
| 537 | } else if (!byte_diff(p, 23, "livesync.cluster.listen") && isspace(p[23])) { | ||
| 519 | uint16_t tmpport = LIVESYNC_PORT; | 538 | uint16_t tmpport = LIVESYNC_PORT; |
| 520 | if( !scan_ip6_port( p+24, tmpip, &tmpport )) goto parse_error; | 539 | if (!scan_ip6_port(p + 24, tmpip, &tmpport)) |
| 521 | livesync_bind_mcast( tmpip, tmpport ); | 540 | goto parse_error; |
| 541 | livesync_bind_mcast(tmpip, tmpport); | ||
| 522 | #endif | 542 | #endif |
| 523 | } else | 543 | } else |
| 524 | fprintf( stderr, "Unhandled line in config file: %s\n", inbuf ); | 544 | fprintf(stderr, "Unhandled line in config file: %s\n", inbuf); |
| 525 | continue; | 545 | continue; |
| 526 | parse_error: | 546 | parse_error: |
| 527 | fprintf( stderr, "Parse error in config file: %s\n", inbuf); | 547 | fprintf(stderr, "Parse error in config file: %s\n", inbuf); |
| 528 | } | 548 | } |
| 529 | fclose( accesslist_filehandle ); | 549 | fclose(accesslist_filehandle); |
| 530 | return bound; | 550 | return bound; |
| 531 | } | 551 | } |
| 532 | 552 | ||
| 533 | void load_state(const char * const state_filename ) { | 553 | void load_state(const char *const state_filename) { |
| 534 | FILE * state_filehandle; | 554 | FILE *state_filehandle; |
| 535 | char inbuf[512]; | 555 | char inbuf[512]; |
| 536 | ot_hash infohash; | 556 | ot_hash infohash; |
| 537 | unsigned long long base, downcount; | 557 | unsigned long long base, downcount; |
| 538 | int consumed; | 558 | int consumed; |
| 539 | 559 | ||
| 540 | state_filehandle = fopen( state_filename, "r" ); | 560 | state_filehandle = fopen(state_filename, "r"); |
| 541 | 561 | ||
| 542 | if( state_filehandle == NULL ) { | 562 | if (state_filehandle == NULL) { |
| 543 | fprintf( stderr, "Warning: Can't open config file: %s.", state_filename ); | 563 | fprintf(stderr, "Warning: Can't open config file: %s.", state_filename); |
| 544 | return; | 564 | return; |
| 545 | } | 565 | } |
| 546 | 566 | ||
| 547 | /* We do ignore anything that is not of the form "^[:xdigit:]:\d+:\d+" */ | 567 | /* We do ignore anything that is not of the form "^[:xdigit:]:\d+:\d+" */ |
| 548 | while( fgets( inbuf, sizeof(inbuf), state_filehandle ) ) { | 568 | while (fgets(inbuf, sizeof(inbuf), state_filehandle)) { |
| 549 | int i; | 569 | int i; |
| 550 | for( i=0; i<(int)sizeof(ot_hash); ++i ) { | 570 | for (i = 0; i < (int)sizeof(ot_hash); ++i) { |
| 551 | int eger = 16 * scan_fromhex( inbuf[ 2*i ] ) + scan_fromhex( inbuf[ 1 + 2*i ] ); | 571 | int eger = 16 * scan_fromhex(inbuf[2 * i]) + scan_fromhex(inbuf[1 + 2 * i]); |
| 552 | if( eger < 0 ) | 572 | if (eger < 0) |
| 553 | continue; | 573 | continue; |
| 554 | infohash[i] = eger; | 574 | infohash[i] = eger; |
| 555 | } | 575 | } |
| 556 | 576 | ||
| 557 | if( i != (int)sizeof(ot_hash) ) continue; | 577 | if (i != (int)sizeof(ot_hash)) |
| 578 | continue; | ||
| 558 | i *= 2; | 579 | i *= 2; |
| 559 | 580 | ||
| 560 | if( inbuf[ i++ ] != ':' || !( consumed = scan_ulonglong( inbuf+i, &base ) ) ) continue; | 581 | if (inbuf[i++] != ':' || !(consumed = scan_ulonglong(inbuf + i, &base))) |
| 582 | continue; | ||
| 561 | i += consumed; | 583 | i += consumed; |
| 562 | if( inbuf[ i++ ] != ':' || !( consumed = scan_ulonglong( inbuf+i, &downcount ) ) ) continue; | 584 | if (inbuf[i++] != ':' || !(consumed = scan_ulonglong(inbuf + i, &downcount))) |
| 563 | add_torrent_from_saved_state( infohash, base, downcount ); | 585 | continue; |
| 586 | add_torrent_from_saved_state(infohash, base, downcount); | ||
| 564 | } | 587 | } |
| 565 | 588 | ||
| 566 | fclose( state_filehandle ); | 589 | fclose(state_filehandle); |
| 567 | } | 590 | } |
| 568 | 591 | ||
| 569 | int drop_privileges ( const char * const serveruser, const char * const serverdir ) { | 592 | int drop_privileges(const char *const serveruser, const char *const serverdir) { |
| 570 | struct passwd *pws = NULL; | 593 | struct passwd *pws = NULL; |
| 571 | 594 | ||
| 572 | #ifdef _DEBUG | 595 | #ifdef _DEBUG |
| 573 | if( !geteuid() ) | 596 | if (!geteuid()) |
| 574 | fprintf( stderr, "Dropping to user %s.\n", serveruser ); | 597 | fprintf(stderr, "Dropping to user %s.\n", serveruser); |
| 575 | if( serverdir ) | 598 | if (serverdir) |
| 576 | fprintf( stderr, "ch%s'ing to directory %s.\n", geteuid() ? "dir" : "root", serverdir ); | 599 | fprintf(stderr, "ch%s'ing to directory %s.\n", geteuid() ? "dir" : "root", serverdir); |
| 577 | #endif | 600 | #endif |
| 578 | 601 | ||
| 579 | /* Grab pws entry before chrooting */ | 602 | /* Grab pws entry before chrooting */ |
| 580 | pws = getpwnam( serveruser ); | 603 | pws = getpwnam(serveruser); |
| 581 | endpwent(); | 604 | endpwent(); |
| 582 | 605 | ||
| 583 | if( geteuid() == 0 ) { | 606 | if (geteuid() == 0) { |
| 584 | /* Running as root: chroot and drop privileges */ | 607 | /* Running as root: chroot and drop privileges */ |
| 585 | if( serverdir && chroot( serverdir ) ) { | 608 | if (serverdir && chroot(serverdir)) { |
| 586 | fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) ); | 609 | fprintf(stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno)); |
| 587 | return -1; | 610 | return -1; |
| 588 | } | 611 | } |
| 589 | 612 | ||
| 590 | if(chdir("/")) | 613 | if (chdir("/")) |
| 591 | panic("chdir() failed after chrooting: "); | 614 | panic("chdir() failed after chrooting: "); |
| 592 | 615 | ||
| 593 | /* If we can't find server user, revert to nobody's default uid */ | 616 | /* If we can't find server user, revert to nobody's default uid */ |
| 594 | if( !pws ) { | 617 | if (!pws) { |
| 595 | fprintf( stderr, "Warning: Could not get password entry for %s. Reverting to uid -2.\n", serveruser ); | 618 | fprintf(stderr, "Warning: Could not get password entry for %s. Reverting to uid -2.\n", serveruser); |
| 596 | if (setegid( (gid_t)-2 ) || setgid( (gid_t)-2 ) || setuid( (uid_t)-2 ) || seteuid( (uid_t)-2 )) | 619 | if (setegid((gid_t)-2) || setgid((gid_t)-2) || setuid((uid_t)-2) || seteuid((uid_t)-2)) |
| 597 | panic("Could not set uid to value -2"); | 620 | panic("Could not set uid to value -2"); |
| 598 | } | 621 | } else { |
| 599 | else { | 622 | if (setegid(pws->pw_gid) || setgid(pws->pw_gid) || setuid(pws->pw_uid) || seteuid(pws->pw_uid)) |
| 600 | if (setegid( pws->pw_gid ) || setgid( pws->pw_gid ) || setuid( pws->pw_uid ) || seteuid( pws->pw_uid )) | ||
| 601 | panic("Could not set uid to specified value"); | 623 | panic("Could not set uid to specified value"); |
| 602 | } | 624 | } |
| 603 | 625 | ||
| 604 | if( geteuid() == 0 || getegid() == 0 ) | 626 | if (geteuid() == 0 || getegid() == 0) |
| 605 | panic("Still running with root privileges?!"); | 627 | panic("Still running with root privileges?!"); |
| 606 | } | 628 | } else { |
| 607 | else { | ||
| 608 | /* Normal user, just chdir() */ | 629 | /* Normal user, just chdir() */ |
| 609 | if( serverdir && chdir( serverdir ) ) { | 630 | if (serverdir && chdir(serverdir)) { |
| 610 | fprintf( stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno) ); | 631 | fprintf(stderr, "Could not chroot to %s, because: %s\n", serverdir, strerror(errno)); |
| 611 | return -1; | 632 | return -1; |
| 612 | } | 633 | } |
| 613 | } | 634 | } |
| @@ -615,119 +636,173 @@ int drop_privileges ( const char * const serveruser, const char * const serverdi | |||
| 615 | return 0; | 636 | return 0; |
| 616 | } | 637 | } |
| 617 | 638 | ||
| 618 | int main( int argc, char **argv ) { | 639 | /* Maintain our copy of the clock. time() on BSDs is very expensive. */ |
| 619 | ot_ip6 serverip; | 640 | static void *time_caching_worker(void *args) { |
| 620 | ot_net tmpnet; | 641 | (void)args; |
| 621 | int bound = 0, scanon = 1; | 642 | while (1) { |
| 622 | uint16_t tmpport; | 643 | g_now_seconds = time(NULL); |
| 623 | char * statefile = 0; | 644 | sleep(5); |
| 624 | 645 | } | |
| 625 | memset( serverip, 0, sizeof(ot_ip6) ); | 646 | return NULL; |
| 626 | #ifndef WANT_V6 | 647 | } |
| 627 | serverip[10]=serverip[11]=-1; | 648 | |
| 628 | noipv6=1; | 649 | int main(int argc, char **argv) { |
| 650 | ot_ip6 serverip; | ||
| 651 | ot_net tmpnet; | ||
| 652 | int bound = 0, scanon = 1; | ||
| 653 | uint16_t tmpport; | ||
| 654 | char *statefile = 0; | ||
| 655 | pthread_t thread_id; /* time cacher */ | ||
| 656 | |||
| 657 | memset(serverip, 0, sizeof(ot_ip6)); | ||
| 658 | #ifdef WANT_V4_ONLY | ||
| 659 | serverip[10] = serverip[11] = -1; | ||
| 629 | #endif | 660 | #endif |
| 630 | 661 | ||
| 631 | #ifdef WANT_DEV_RANDOM | 662 | #ifdef WANT_DEV_RANDOM |
| 632 | srandomdev(); | 663 | srandomdev(); |
| 633 | #else | 664 | #else |
| 634 | srandom( time(NULL) ); | 665 | srandom(time(NULL)); |
| 635 | #endif | 666 | #endif |
| 636 | 667 | ||
| 637 | while( scanon ) { | 668 | while (scanon) { |
| 638 | switch( getopt( argc, argv, ":i:p:A:P:d:u:r:s:f:l:v" | 669 | switch (getopt(argc, argv, |
| 670 | ":i:p:A:P:d:u:r:s:f:l:v" | ||
| 639 | #ifdef WANT_ACCESSLIST_BLACK | 671 | #ifdef WANT_ACCESSLIST_BLACK |
| 640 | "b:" | 672 | "b:" |
| 641 | #elif defined( WANT_ACCESSLIST_WHITE ) | 673 | #elif defined(WANT_ACCESSLIST_WHITE) |
| 642 | "w:" | 674 | "w:" |
| 643 | #endif | 675 | #endif |
| 644 | "h" ) ) { | 676 | "h")) { |
| 645 | case -1 : scanon = 0; break; | 677 | case -1: |
| 646 | case 'i': | 678 | scanon = 0; |
| 647 | if( !scan_ip6( optarg, serverip )) { usage( argv[0] ); exit( 1 ); } | 679 | break; |
| 648 | break; | 680 | case 'i': |
| 681 | if (!scan_ip6(optarg, serverip)) { | ||
| 682 | usage(argv[0]); | ||
| 683 | exit(1); | ||
| 684 | } | ||
| 685 | break; | ||
| 649 | #ifdef WANT_ACCESSLIST_BLACK | 686 | #ifdef WANT_ACCESSLIST_BLACK |
| 650 | case 'b': set_config_option( &g_accesslist_filename, optarg); break; | 687 | case 'b': |
| 651 | #elif defined( WANT_ACCESSLIST_WHITE ) | 688 | set_config_option(&g_accesslist_filename, optarg); |
| 652 | case 'w': set_config_option( &g_accesslist_filename, optarg); break; | 689 | break; |
| 690 | #elif defined(WANT_ACCESSLIST_WHITE) | ||
| 691 | case 'w': | ||
| 692 | set_config_option(&g_accesslist_filename, optarg); | ||
| 693 | break; | ||
| 653 | #endif | 694 | #endif |
| 654 | case 'p': | 695 | case 'p': |
| 655 | if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } | 696 | if (!scan_ushort(optarg, &tmpport)) { |
| 656 | ot_try_bind( serverip, tmpport, FLAG_TCP ); bound++; break; | 697 | usage(argv[0]); |
| 657 | case 'P': | 698 | exit(1); |
| 658 | if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } | 699 | } |
| 659 | ot_try_bind( serverip, tmpport, FLAG_UDP ); bound++; break; | 700 | ot_try_bind(serverip, tmpport, FLAG_TCP); |
| 701 | bound++; | ||
| 702 | break; | ||
| 703 | case 'P': | ||
| 704 | if (!scan_ushort(optarg, &tmpport)) { | ||
| 705 | usage(argv[0]); | ||
| 706 | exit(1); | ||
| 707 | } | ||
| 708 | ot_try_bind(serverip, tmpport, FLAG_UDP); | ||
| 709 | bound++; | ||
| 710 | break; | ||
| 660 | #ifdef WANT_SYNC_LIVE | 711 | #ifdef WANT_SYNC_LIVE |
| 661 | case 's': | 712 | case 's': |
| 662 | if( !scan_ushort( optarg, &tmpport)) { usage( argv[0] ); exit( 1 ); } | 713 | if (!scan_ushort(optarg, &tmpport)) { |
| 663 | livesync_bind_mcast( serverip, tmpport); break; | 714 | usage(argv[0]); |
| 715 | exit(1); | ||
| 716 | } | ||
| 717 | livesync_bind_mcast(serverip, tmpport); | ||
| 718 | break; | ||
| 664 | #endif | 719 | #endif |
| 665 | case 'd': set_config_option( &g_serverdir, optarg ); break; | 720 | case 'd': |
| 666 | case 'u': set_config_option( &g_serveruser, optarg ); break; | 721 | set_config_option(&g_serverdir, optarg); |
| 667 | case 'r': set_config_option( &g_redirecturl, optarg ); break; | 722 | break; |
| 668 | case 'l': statefile = optarg; break; | 723 | case 'u': |
| 669 | case 'A': | 724 | set_config_option(&g_serveruser, optarg); |
| 670 | if( !scan_ip6_net( optarg, &tmpnet )) { usage( argv[0] ); exit( 1 ); } | 725 | break; |
| 671 | accesslist_bless_net( &tmpnet, 0xffff ); /* Allow everything for now */ | 726 | case 'r': |
| 672 | break; | 727 | set_config_option(&g_redirecturl, optarg); |
| 673 | case 'f': bound += parse_configfile( optarg ); break; | 728 | break; |
| 674 | case 'h': help( argv[0] ); exit( 0 ); | 729 | case 'l': |
| 675 | case 'v': { | 730 | statefile = optarg; |
| 676 | char buffer[8192]; | 731 | break; |
| 677 | stats_return_tracker_version( buffer ); | 732 | case 'A': |
| 678 | fputs( buffer, stderr ); | 733 | if (!scan_ip6_net(optarg, &tmpnet)) { |
| 679 | exit( 0 ); | 734 | usage(argv[0]); |
| 735 | exit(1); | ||
| 680 | } | 736 | } |
| 681 | default: | 737 | accesslist_bless_net(&tmpnet, 0xffff); /* Allow everything for now */ |
| 682 | case '?': usage( argv[0] ); exit( 1 ); | 738 | break; |
| 739 | case 'f': | ||
| 740 | bound += parse_configfile(optarg); | ||
| 741 | break; | ||
| 742 | case 'h': | ||
| 743 | help(argv[0]); | ||
| 744 | exit(0); | ||
| 745 | case 'v': { | ||
| 746 | char buffer[8192]; | ||
| 747 | stats_return_tracker_version(buffer); | ||
| 748 | fputs(buffer, stderr); | ||
| 749 | exit(0); | ||
| 750 | } | ||
| 751 | default: | ||
| 752 | case '?': | ||
| 753 | usage(argv[0]); | ||
| 754 | exit(1); | ||
| 683 | } | 755 | } |
| 684 | } | 756 | } |
| 685 | 757 | ||
| 686 | /* Bind to our default tcp/udp ports */ | 758 | /* Bind to our default tcp/udp ports */ |
| 687 | if( !bound) { | 759 | if (!bound) { |
| 688 | ot_try_bind( serverip, 6969, FLAG_TCP ); | 760 | ot_try_bind(serverip, 6969, FLAG_TCP); |
| 689 | ot_try_bind( serverip, 6969, FLAG_UDP ); | 761 | ot_try_bind(serverip, 6969, FLAG_UDP); |
| 690 | } | 762 | } |
| 691 | 763 | ||
| 764 | defaul_signal_handlers(); | ||
| 765 | |||
| 692 | #ifdef WANT_SYSLOGS | 766 | #ifdef WANT_SYSLOGS |
| 693 | openlog( "opentracker", 0, LOG_USER ); | 767 | openlog("opentracker", 0, LOG_USER); |
| 694 | setlogmask(LOG_UPTO(LOG_INFO)); | 768 | setlogmask(LOG_UPTO(LOG_INFO)); |
| 695 | #endif | 769 | #endif |
| 696 | 770 | ||
| 697 | if( drop_privileges( g_serveruser ? g_serveruser : "nobody", g_serverdir ) == -1 ) | 771 | if (drop_privileges(g_serveruser ? g_serveruser : "nobody", g_serverdir) == -1) |
| 698 | panic( "drop_privileges failed, exiting. Last error"); | 772 | panic("drop_privileges failed, exiting. Last error"); |
| 699 | 773 | ||
| 700 | g_now_seconds = time( NULL ); | 774 | g_now_seconds = time(NULL); |
| 775 | pthread_create(&thread_id, NULL, time_caching_worker, NULL); | ||
| 701 | 776 | ||
| 702 | /* Create our self pipe which allows us to interrupt mainloops | 777 | /* Create our self pipe which allows us to interrupt mainloops |
| 703 | io_wait in case some data is available to send out */ | 778 | io_wait in case some data is available to send out */ |
| 704 | if( pipe( g_self_pipe ) == -1 ) | 779 | if (pipe(g_self_pipe) == -1) |
| 705 | panic( "selfpipe failed: " ); | 780 | panic("selfpipe failed: "); |
| 706 | if( !io_fd( g_self_pipe[0] ) ) | 781 | if (!io_fd(g_self_pipe[0])) |
| 707 | panic( "selfpipe io_fd failed: " ); | 782 | panic("selfpipe io_fd failed: "); |
| 708 | if( !io_fd( g_self_pipe[1] ) ) | 783 | if (!io_fd(g_self_pipe[1])) |
| 709 | panic( "selfpipe io_fd failed: " ); | 784 | panic("selfpipe io_fd failed: "); |
| 710 | io_setcookie( g_self_pipe[0], (void*)FLAG_SELFPIPE ); | 785 | io_setcookie(g_self_pipe[0], (void *)FLAG_SELFPIPE); |
| 711 | io_wantread( g_self_pipe[0] ); | 786 | io_wantread(g_self_pipe[0]); |
| 712 | 787 | ||
| 713 | defaul_signal_handlers( ); | ||
| 714 | /* Init all sub systems. This call may fail with an exit() */ | 788 | /* Init all sub systems. This call may fail with an exit() */ |
| 715 | trackerlogic_init( ); | 789 | trackerlogic_init(); |
| 716 | 790 | ||
| 717 | if( statefile ) | 791 | #ifdef _DEBUG_RANDOMTORRENTS |
| 718 | load_state( statefile ); | 792 | fprintf(stderr, "DEBUG: Generating %d random peers on random torrents. This may take a while. (Setting RANDOMTORRENTS in trackerlogic.h)\n", RANDOMTORRENTS); |
| 793 | trackerlogic_add_random_torrents(RANDOMTORRENTS); | ||
| 794 | fprintf(stderr, "... done.\n"); | ||
| 795 | #endif | ||
| 719 | 796 | ||
| 720 | install_signal_handlers( ); | 797 | if (statefile) |
| 798 | load_state(statefile); | ||
| 721 | 799 | ||
| 722 | if( !g_udp_workers ) | 800 | install_signal_handlers(); |
| 723 | udp_init( -1, 0 ); | ||
| 724 | 801 | ||
| 725 | /* Kick off our initial clock setting alarm */ | 802 | if (!g_udp_workers) |
| 726 | alarm(5); | 803 | udp_init(-1, 0); |
| 727 | 804 | ||
| 728 | server_mainloop( 0 ); | 805 | server_mainloop(0); |
| 729 | 806 | ||
| 730 | return 0; | 807 | return 0; |
| 731 | } | 808 | } |
| 732 | |||
| 733 | const char *g_version_opentracker_c = "$Source$: $Revision$\n"; | ||
diff --git a/opentracker.conf.sample b/opentracker.conf.sample index d44f3d4..054e405 100644 --- a/opentracker.conf.sample +++ b/opentracker.conf.sample | |||
| @@ -2,7 +2,7 @@ | |||
| 2 | # | 2 | # |
| 3 | 3 | ||
| 4 | # I) Address opentracker will listen on, using both, tcp AND udp family | 4 | # I) Address opentracker will listen on, using both, tcp AND udp family |
| 5 | # (note, that port 6969 is implicite if ommitted). | 5 | # (note, that port 6969 is implicit if omitted). |
| 6 | # | 6 | # |
| 7 | # If no listen option is given (here or on the command line), opentracker | 7 | # If no listen option is given (here or on the command line), opentracker |
| 8 | # listens on 0.0.0.0:6969 tcp and udp. | 8 | # listens on 0.0.0.0:6969 tcp and udp. |
| @@ -83,9 +83,10 @@ | |||
| 83 | # IIb) | 83 | # IIb) |
| 84 | # If you do not want to grant anyone access to your stats, enable the | 84 | # If you do not want to grant anyone access to your stats, enable the |
| 85 | # WANT_RESTRICT_STATS option in Makefile and bless the ip addresses | 85 | # WANT_RESTRICT_STATS option in Makefile and bless the ip addresses |
| 86 | # allowed to fetch stats here. | 86 | # or network allowed to fetch stats here. |
| 87 | # | 87 | # |
| 88 | # access.stats 192.168.0.23 | 88 | # access.stats 192.168.0.23 |
| 89 | # access.stats 10.1.1.23 | ||
| 89 | # | 90 | # |
| 90 | # There is another way of hiding your stats. You can obfuscate the path | 91 | # There is another way of hiding your stats. You can obfuscate the path |
| 91 | # to them. Normally it is located at /stats but you can configure it to | 92 | # to them. Normally it is located at /stats but you can configure it to |
| @@ -93,14 +94,14 @@ | |||
| 93 | # | 94 | # |
| 94 | # access.stats_path stats | 95 | # access.stats_path stats |
| 95 | # | 96 | # |
| 96 | # IIc) | 97 | # II |
| 97 | # If opentracker lives behind one or multiple reverse proxies, you can | 98 | # If opentracker lives behind one or multiple reverse proxies, |
| 98 | # every http connection appears to come from these proxies. In order to | 99 | # every http connection appears to come from these proxies. In order to |
| 99 | # take the X-Forwarded-For address instead, compile opentracker with the | 100 | # take the X-Forwarded-For address instead, compile opentracker with the |
| 100 | # WANT_IP_FROM_PROXY option and set your proxy addresses here. | 101 | # WANT_IP_FROM_PROXY option and set your proxy addresses or networkss here. |
| 101 | # | 102 | # |
| 102 | # access.proxy 10.0.1.23 | 103 | # access.proxy 10.0.1.23 |
| 103 | # access.proxy 10.0.1.24 | 104 | # access.proxy 192.0.0.0/8 |
| 104 | # | 105 | # |
| 105 | 106 | ||
| 106 | # III) Live sync uses udp multicast packets to keep a cluster of opentrackers | 107 | # III) Live sync uses udp multicast packets to keep a cluster of opentrackers |
diff --git a/ot_accesslist.c b/ot_accesslist.c index 181c8f0..4b88c40 100644 --- a/ot_accesslist.c +++ b/ot_accesslist.c | |||
| @@ -5,35 +5,35 @@ | |||
| 5 | 5 | ||
| 6 | /* System */ | 6 | /* System */ |
| 7 | #include <pthread.h> | 7 | #include <pthread.h> |
| 8 | #include <signal.h> | ||
| 9 | #include <stdio.h> | ||
| 8 | #include <stdlib.h> | 10 | #include <stdlib.h> |
| 9 | #include <string.h> | 11 | #include <string.h> |
| 10 | #include <stdio.h> | ||
| 11 | #include <signal.h> | ||
| 12 | #include <unistd.h> | 12 | #include <unistd.h> |
| 13 | #ifdef WANT_DYNAMIC_ACCESSLIST | 13 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| 14 | #include <sys/types.h> | ||
| 15 | #include <sys/stat.h> | ||
| 16 | #include <errno.h> | 14 | #include <errno.h> |
| 15 | #include <sys/stat.h> | ||
| 16 | #include <sys/types.h> | ||
| 17 | #endif | 17 | #endif |
| 18 | 18 | ||
| 19 | /* Libowfat */ | 19 | /* Libowfat */ |
| 20 | #include "byte.h" | 20 | #include "byte.h" |
| 21 | #include "scan.h" | 21 | #include "fmt.h" |
| 22 | #include "ip6.h" | 22 | #include "ip6.h" |
| 23 | #include "mmap.h" | 23 | #include "mmap.h" |
| 24 | #include "fmt.h" | 24 | #include "scan.h" |
| 25 | 25 | ||
| 26 | /* Opentracker */ | 26 | /* Opentracker */ |
| 27 | #include "trackerlogic.h" | ||
| 28 | #include "ot_accesslist.h" | 27 | #include "ot_accesslist.h" |
| 29 | #include "ot_vector.h" | 28 | #include "ot_vector.h" |
| 29 | #include "trackerlogic.h" | ||
| 30 | 30 | ||
| 31 | /* GLOBAL VARIABLES */ | 31 | /* GLOBAL VARIABLES */ |
| 32 | #ifdef WANT_ACCESSLIST | 32 | #ifdef WANT_ACCESSLIST |
| 33 | char *g_accesslist_filename = NULL; | 33 | char *g_accesslist_filename = NULL; |
| 34 | #ifdef WANT_DYNAMIC_ACCESSLIST | 34 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| 35 | char *g_accesslist_pipe_add = NULL; | 35 | char *g_accesslist_pipe_add = NULL; |
| 36 | char *g_accesslist_pipe_delete = NULL; | 36 | char *g_accesslist_pipe_delete = NULL; |
| 37 | #endif | 37 | #endif |
| 38 | static pthread_mutex_t g_accesslist_mutex; | 38 | static pthread_mutex_t g_accesslist_mutex; |
| 39 | 39 | ||
| @@ -55,20 +55,18 @@ struct ot_accesslist { | |||
| 55 | ot_time base; | 55 | ot_time base; |
| 56 | ot_accesslist *next; | 56 | ot_accesslist *next; |
| 57 | }; | 57 | }; |
| 58 | static ot_accesslist * _Atomic g_accesslist = NULL; | 58 | static ot_accesslist *_Atomic g_accesslist = NULL; |
| 59 | #ifdef WANT_DYNAMIC_ACCESSLIST | 59 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| 60 | static ot_accesslist * _Atomic g_accesslist_add = NULL; | 60 | static ot_accesslist *_Atomic g_accesslist_add = NULL; |
| 61 | static ot_accesslist * _Atomic g_accesslist_delete = NULL; | 61 | static ot_accesslist *_Atomic g_accesslist_delete = NULL; |
| 62 | #endif | 62 | #endif |
| 63 | 63 | ||
| 64 | /* Helpers to work on access lists */ | 64 | /* Helpers to work on access lists */ |
| 65 | static int vector_compare_hash(const void *hash1, const void *hash2 ) { | 65 | static int vector_compare_hash(const void *hash1, const void *hash2) { return memcmp(hash1, hash2, OT_HASH_COMPARE_SIZE); } |
| 66 | return memcmp( hash1, hash2, OT_HASH_COMPARE_SIZE ); | ||
| 67 | } | ||
| 68 | 66 | ||
| 69 | static ot_accesslist * accesslist_free(ot_accesslist *accesslist) { | 67 | static ot_accesslist *accesslist_free(ot_accesslist *accesslist) { |
| 70 | while (accesslist) { | 68 | while (accesslist) { |
| 71 | ot_accesslist * this_accesslist = accesslist; | 69 | ot_accesslist *this_accesslist = accesslist; |
| 72 | accesslist = this_accesslist->next; | 70 | accesslist = this_accesslist->next; |
| 73 | free(this_accesslist->list); | 71 | free(this_accesslist->list); |
| 74 | free(this_accesslist); | 72 | free(this_accesslist); |
| @@ -76,8 +74,8 @@ static ot_accesslist * accesslist_free(ot_accesslist *accesslist) { | |||
| 76 | return NULL; | 74 | return NULL; |
| 77 | } | 75 | } |
| 78 | 76 | ||
| 79 | static ot_accesslist * accesslist_make(ot_accesslist *next, size_t size) { | 77 | static ot_accesslist *accesslist_make(ot_accesslist *next, size_t size) { |
| 80 | ot_accesslist * accesslist_new = malloc(sizeof(ot_accesslist)); | 78 | ot_accesslist *accesslist_new = malloc(sizeof(ot_accesslist)); |
| 81 | if (accesslist_new) { | 79 | if (accesslist_new) { |
| 82 | accesslist_new->list = size ? malloc(sizeof(ot_hash) * size) : NULL; | 80 | accesslist_new->list = size ? malloc(sizeof(ot_hash) * size) : NULL; |
| 83 | accesslist_new->size = size; | 81 | accesslist_new->size = size; |
| @@ -102,76 +100,77 @@ static void accesslist_clean(ot_accesslist *accesslist) { | |||
| 102 | } | 100 | } |
| 103 | 101 | ||
| 104 | /* Read initial access list */ | 102 | /* Read initial access list */ |
| 105 | static void accesslist_readfile( void ) { | 103 | static void accesslist_readfile(void) { |
| 106 | ot_accesslist * accesslist_new; | 104 | ot_accesslist *accesslist_new; |
| 107 | ot_hash *info_hash; | 105 | ot_hash *info_hash; |
| 108 | const char *map, *map_end, *read_offs; | 106 | const char *map, *map_end, *read_offs; |
| 109 | size_t maplen; | 107 | size_t maplen; |
| 110 | 108 | ||
| 111 | if( ( map = mmap_read( g_accesslist_filename, &maplen ) ) == NULL ) { | 109 | if ((map = mmap_read(g_accesslist_filename, &maplen)) == NULL) { |
| 112 | char *wd = getcwd( NULL, 0 ); | 110 | char *wd = getcwd(NULL, 0); |
| 113 | fprintf( stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd ); | 111 | fprintf(stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd); |
| 114 | free( wd ); | 112 | free(wd); |
| 115 | return; | 113 | return; |
| 116 | } | 114 | } |
| 117 | 115 | ||
| 118 | /* You need at least 41 bytes to pass an info_hash, make enough room | 116 | /* You need at least 41 bytes to pass an info_hash, make enough room |
| 119 | for the maximum amount of them */ | 117 | for the maximum amount of them */ |
| 120 | accesslist_new = accesslist_make(g_accesslist, maplen / 41); | 118 | accesslist_new = accesslist_make(g_accesslist, maplen / 41); |
| 121 | if( !accesslist_new ) { | 119 | if (!accesslist_new) { |
| 122 | fprintf( stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", ( maplen / 41 ) * 20 ); | 120 | fprintf(stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", (maplen / 41) * 20); |
| 123 | mmap_unmap( map, maplen); | 121 | mmap_unmap(map, maplen); |
| 124 | return; | 122 | return; |
| 125 | } | 123 | } |
| 126 | info_hash = accesslist_new->list; | 124 | info_hash = accesslist_new->list; |
| 127 | 125 | ||
| 128 | /* No use to scan if there's not enough room for another full info_hash */ | 126 | /* No use to scan if there's not enough room for another full info_hash */ |
| 129 | map_end = map + maplen - 40; | 127 | map_end = map + maplen - 40; |
| 130 | read_offs = map; | 128 | read_offs = map; |
| 131 | 129 | ||
| 132 | /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" */ | 130 | /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" */ |
| 133 | while( read_offs <= map_end ) { | 131 | while (read_offs <= map_end) { |
| 134 | int i; | 132 | int i; |
| 135 | for( i=0; i<(int)sizeof(ot_hash); ++i ) { | 133 | for (i = 0; i < (int)sizeof(ot_hash); ++i) { |
| 136 | int eger1 = scan_fromhex( (unsigned char)read_offs[ 2*i ] ); | 134 | int eger1 = scan_fromhex((unsigned char)read_offs[2 * i]); |
| 137 | int eger2 = scan_fromhex( (unsigned char)read_offs[ 1 + 2*i ] ); | 135 | int eger2 = scan_fromhex((unsigned char)read_offs[1 + 2 * i]); |
| 138 | if( eger1 < 0 || eger2 < 0 ) | 136 | if (eger1 < 0 || eger2 < 0) |
| 139 | break; | 137 | break; |
| 140 | (*info_hash)[i] = (uint8_t)(eger1 * 16 + eger2); | 138 | (*info_hash)[i] = (uint8_t)(eger1 * 16 + eger2); |
| 141 | } | 139 | } |
| 142 | 140 | ||
| 143 | if( i == sizeof(ot_hash) ) { | 141 | if (i == sizeof(ot_hash)) { |
| 144 | read_offs += 40; | 142 | read_offs += 40; |
| 145 | 143 | ||
| 146 | /* Append accesslist to accesslist vector */ | 144 | /* Append accesslist to accesslist vector */ |
| 147 | if( read_offs == map_end || scan_fromhex( (unsigned char)*read_offs ) < 0 ) | 145 | if (read_offs == map_end || scan_fromhex((unsigned char)*read_offs) < 0) |
| 148 | ++info_hash; | 146 | ++info_hash; |
| 149 | } | 147 | } |
| 150 | 148 | ||
| 151 | /* Find start of next line */ | 149 | /* Find start of next line */ |
| 152 | while( read_offs <= map_end && *(read_offs++) != '\n' ); | 150 | while (read_offs <= map_end && *(read_offs++) != '\n') |
| 151 | ; | ||
| 153 | } | 152 | } |
| 154 | #ifdef _DEBUG | 153 | #ifdef _DEBUG |
| 155 | fprintf( stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new->list) ); | 154 | fprintf(stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new->list)); |
| 156 | #endif | 155 | #endif |
| 157 | 156 | ||
| 158 | mmap_unmap( map, maplen); | 157 | mmap_unmap(map, maplen); |
| 159 | 158 | ||
| 160 | qsort( accesslist_new->list, info_hash - accesslist_new->list, sizeof( *info_hash ), vector_compare_hash ); | 159 | qsort(accesslist_new->list, info_hash - accesslist_new->list, sizeof(*info_hash), vector_compare_hash); |
| 161 | accesslist_new->size = info_hash - accesslist_new->list; | 160 | accesslist_new->size = info_hash - accesslist_new->list; |
| 162 | 161 | ||
| 163 | /* Now exchange the accesslist vector in the least race condition prone way */ | 162 | /* Now exchange the accesslist vector in the least race condition prone way */ |
| 164 | pthread_mutex_lock(&g_accesslist_mutex); | 163 | pthread_mutex_lock(&g_accesslist_mutex); |
| 165 | accesslist_new->next = g_accesslist; | 164 | accesslist_new->next = g_accesslist; |
| 166 | g_accesslist = accesslist_new; /* Only now set a new list */ | 165 | g_accesslist = accesslist_new; /* Only now set a new list */ |
| 167 | 166 | ||
| 168 | #ifdef WANT_DYNAMIC_ACCESSLIST | 167 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| 169 | /* If we have dynamic accesslists, reloading a new one will always void the add/delete lists. | 168 | /* If we have dynamic accesslists, reloading a new one will always void the add/delete lists. |
| 170 | Insert empty ones at the list head */ | 169 | Insert empty ones at the list head */ |
| 171 | if (g_accesslist_add && (accesslist_new = accesslist_make(g_accesslist_add, 0)) != NULL) | 170 | if (g_accesslist_add && (accesslist_new = accesslist_make(g_accesslist_add, 0)) != NULL) |
| 172 | g_accesslist_add = accesslist_new; | 171 | g_accesslist_add = accesslist_new; |
| 173 | if (g_accesslist_delete && (accesslist_new = accesslist_make(g_accesslist_delete, 0)) != NULL) | 172 | if (g_accesslist_delete && (accesslist_new = accesslist_make(g_accesslist_delete, 0)) != NULL) |
| 174 | g_accesslist_delete = accesslist_new; | 173 | g_accesslist_delete = accesslist_new; |
| 175 | #endif | 174 | #endif |
| 176 | 175 | ||
| 177 | accesslist_clean(g_accesslist); | 176 | accesslist_clean(g_accesslist); |
| @@ -179,26 +178,26 @@ static void accesslist_readfile( void ) { | |||
| 179 | pthread_mutex_unlock(&g_accesslist_mutex); | 178 | pthread_mutex_unlock(&g_accesslist_mutex); |
| 180 | } | 179 | } |
| 181 | 180 | ||
| 182 | int accesslist_hashisvalid( ot_hash hash ) { | 181 | int accesslist_hashisvalid(ot_hash hash) { |
| 183 | /* Get working copy of current access list */ | 182 | /* Get working copy of current access list */ |
| 184 | ot_accesslist * accesslist = g_accesslist; | 183 | ot_accesslist *accesslist = g_accesslist; |
| 185 | #ifdef WANT_DYNAMIC_ACCESSLIST | 184 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| 186 | ot_accesslist * accesslist_add, * accesslist_delete; | 185 | ot_accesslist *accesslist_add, *accesslist_delete; |
| 187 | #endif | 186 | #endif |
| 188 | void * exactmatch = NULL; | 187 | void *exactmatch = NULL; |
| 189 | 188 | ||
| 190 | if (accesslist) | 189 | if (accesslist) |
| 191 | exactmatch = bsearch( hash, accesslist->list, accesslist->size, OT_HASH_COMPARE_SIZE, vector_compare_hash ); | 190 | exactmatch = bsearch(hash, accesslist->list, accesslist->size, OT_HASH_COMPARE_SIZE, vector_compare_hash); |
| 192 | 191 | ||
| 193 | #ifdef WANT_DYNAMIC_ACCESSLIST | 192 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| 194 | /* If we had no match on the main list, scan the list of dynamically added hashes */ | 193 | /* If we had no match on the main list, scan the list of dynamically added hashes */ |
| 195 | accesslist_add = g_accesslist_add; | 194 | accesslist_add = g_accesslist_add; |
| 196 | if ((exactmatch == NULL) && accesslist_add) | 195 | if ((exactmatch == NULL) && accesslist_add) |
| 197 | exactmatch = bsearch( hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash ); | 196 | exactmatch = bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash); |
| 198 | 197 | ||
| 199 | /* If we found a matching hash on the main list, scan the list of dynamically deleted hashes */ | 198 | /* If we found a matching hash on the main list, scan the list of dynamically deleted hashes */ |
| 200 | accesslist_delete = g_accesslist_delete; | 199 | accesslist_delete = g_accesslist_delete; |
| 201 | if ((exactmatch != NULL) && accesslist_delete && bsearch( hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash )) | 200 | if ((exactmatch != NULL) && accesslist_delete && bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash)) |
| 202 | exactmatch = NULL; | 201 | exactmatch = NULL; |
| 203 | #endif | 202 | #endif |
| 204 | 203 | ||
| @@ -209,31 +208,32 @@ int accesslist_hashisvalid( ot_hash hash ) { | |||
| 209 | #endif | 208 | #endif |
| 210 | } | 209 | } |
| 211 | 210 | ||
| 212 | static void * accesslist_worker( void * args ) { | 211 | static void *accesslist_worker(void *args) { |
| 213 | int sig; | 212 | int sig; |
| 214 | sigset_t signal_mask; | 213 | sigset_t signal_mask; |
| 215 | 214 | ||
| 216 | sigemptyset(&signal_mask); | 215 | sigemptyset(&signal_mask); |
| 217 | sigaddset(&signal_mask, SIGHUP); | 216 | sigaddset(&signal_mask, SIGHUP); |
| 218 | 217 | ||
| 219 | (void)args; | 218 | (void)args; |
| 220 | 219 | ||
| 221 | while( 1 ) { | 220 | while (1) { |
| 222 | if (!g_opentracker_running) | 221 | if (!g_opentracker_running) |
| 223 | return NULL; | 222 | return NULL; |
| 224 | 223 | ||
| 225 | /* Initial attempt to read accesslist */ | 224 | /* Initial attempt to read accesslist */ |
| 226 | accesslist_readfile( ); | 225 | accesslist_readfile(); |
| 227 | 226 | ||
| 228 | /* Wait for signals */ | 227 | /* Wait for signals */ |
| 229 | while( sigwait (&signal_mask, &sig) != 0 && sig != SIGHUP ); | 228 | while (sigwait(&signal_mask, &sig) != 0 && sig != SIGHUP) |
| 229 | ; | ||
| 230 | } | 230 | } |
| 231 | return NULL; | 231 | return NULL; |
| 232 | } | 232 | } |
| 233 | 233 | ||
| 234 | #ifdef WANT_DYNAMIC_ACCESSLIST | 234 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| 235 | static pthread_t thread_adder_id, thread_deleter_id; | 235 | static pthread_t thread_adder_id, thread_deleter_id; |
| 236 | static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic * adding_to, ot_accesslist * _Atomic * removing_from) { | 236 | static void *accesslist_adddel_worker(char *fifoname, ot_accesslist *_Atomic *adding_to, ot_accesslist *_Atomic *removing_from) { |
| 237 | struct stat st; | 237 | struct stat st; |
| 238 | 238 | ||
| 239 | if (!stat(fifoname, &st)) { | 239 | if (!stat(fifoname, &st)) { |
| @@ -250,9 +250,9 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic | |||
| 250 | } | 250 | } |
| 251 | 251 | ||
| 252 | while (g_opentracker_running) { | 252 | while (g_opentracker_running) { |
| 253 | FILE * fifo = fopen(fifoname, "r"); | 253 | FILE *fifo = fopen(fifoname, "r"); |
| 254 | char *line = NULL; | 254 | char *line = NULL; |
| 255 | size_t linecap = 0; | 255 | size_t linecap = 0; |
| 256 | ssize_t linelen; | 256 | ssize_t linelen; |
| 257 | 257 | ||
| 258 | if (!fifo) { | 258 | if (!fifo) { |
| @@ -262,7 +262,7 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic | |||
| 262 | 262 | ||
| 263 | while ((linelen = getline(&line, &linecap, fifo)) > 0) { | 263 | while ((linelen = getline(&line, &linecap, fifo)) > 0) { |
| 264 | ot_hash info_hash; | 264 | ot_hash info_hash; |
| 265 | int i; | 265 | int i; |
| 266 | 266 | ||
| 267 | printf("Got line %*s", (int)linelen, line); | 267 | printf("Got line %*s", (int)linelen, line); |
| 268 | /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" | 268 | /* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" |
| @@ -270,15 +270,15 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic | |||
| 270 | if (linelen < 41) | 270 | if (linelen < 41) |
| 271 | continue; | 271 | continue; |
| 272 | 272 | ||
| 273 | for( i=0; i<(int)sizeof(ot_hash); ++i ) { | 273 | for (i = 0; i < (int)sizeof(ot_hash); ++i) { |
| 274 | int eger1 = scan_fromhex( (unsigned char)line[ 2*i ] ); | 274 | int eger1 = scan_fromhex((unsigned char)line[2 * i]); |
| 275 | int eger2 = scan_fromhex( (unsigned char)line[ 1 + 2*i ] ); | 275 | int eger2 = scan_fromhex((unsigned char)line[1 + 2 * i]); |
| 276 | if( eger1 < 0 || eger2 < 0 ) | 276 | if (eger1 < 0 || eger2 < 0) |
| 277 | break; | 277 | break; |
| 278 | ((uint8_t*)info_hash)[i] = (uint8_t)(eger1 * 16 + eger2); | 278 | ((uint8_t *)info_hash)[i] = (uint8_t)(eger1 * 16 + eger2); |
| 279 | } | 279 | } |
| 280 | printf("parsed info_hash %20s\n", info_hash); | 280 | printf("parsed info_hash %20s\n", info_hash); |
| 281 | if( i != sizeof(ot_hash) ) | 281 | if (i != sizeof(ot_hash)) |
| 282 | continue; | 282 | continue; |
| 283 | 283 | ||
| 284 | /* From now on we modify g_accesslist_add and g_accesslist_delete, so prevent the | 284 | /* From now on we modify g_accesslist_add and g_accesslist_delete, so prevent the |
| @@ -287,10 +287,10 @@ printf("parsed info_hash %20s\n", info_hash); | |||
| 287 | 287 | ||
| 288 | /* If the info hash is in the removing_from list, create a new head without that entry */ | 288 | /* If the info hash is in the removing_from list, create a new head without that entry */ |
| 289 | if (*removing_from && (*removing_from)->list) { | 289 | if (*removing_from && (*removing_from)->list) { |
| 290 | ot_hash * exactmatch = bsearch( info_hash, (*removing_from)->list, (*removing_from)->size, OT_HASH_COMPARE_SIZE, vector_compare_hash ); | 290 | ot_hash *exactmatch = bsearch(info_hash, (*removing_from)->list, (*removing_from)->size, OT_HASH_COMPARE_SIZE, vector_compare_hash); |
| 291 | if (exactmatch) { | 291 | if (exactmatch) { |
| 292 | ptrdiff_t off = exactmatch - (*removing_from)->list; | 292 | ptrdiff_t off = exactmatch - (*removing_from)->list; |
| 293 | ot_accesslist * accesslist_new = accesslist_make(*removing_from, (*removing_from)->size - 1); | 293 | ot_accesslist *accesslist_new = accesslist_make(*removing_from, (*removing_from)->size - 1); |
| 294 | if (accesslist_new) { | 294 | if (accesslist_new) { |
| 295 | memcpy(accesslist_new->list, (*removing_from)->list, sizeof(ot_hash) * off); | 295 | memcpy(accesslist_new->list, (*removing_from)->list, sizeof(ot_hash) * off); |
| 296 | memcpy(accesslist_new->list + off, (*removing_from)->list + off + 1, (*removing_from)->size - off - 1); | 296 | memcpy(accesslist_new->list + off, (*removing_from)->list + off + 1, (*removing_from)->size - off - 1); |
| @@ -301,19 +301,19 @@ printf("parsed info_hash %20s\n", info_hash); | |||
| 301 | 301 | ||
| 302 | /* Simple case: there's no adding_to list yet, create one with one member */ | 302 | /* Simple case: there's no adding_to list yet, create one with one member */ |
| 303 | if (!*adding_to) { | 303 | if (!*adding_to) { |
| 304 | ot_accesslist * accesslist_new = accesslist_make(NULL, 1); | 304 | ot_accesslist *accesslist_new = accesslist_make(NULL, 1); |
| 305 | if (accesslist_new) { | 305 | if (accesslist_new) { |
| 306 | memcpy(accesslist_new->list, info_hash, sizeof(ot_hash)); | 306 | memcpy(accesslist_new->list, info_hash, sizeof(ot_hash)); |
| 307 | *adding_to = accesslist_new; | 307 | *adding_to = accesslist_new; |
| 308 | } | 308 | } |
| 309 | } else { | 309 | } else { |
| 310 | int exactmatch = 0; | 310 | int exactmatch = 0; |
| 311 | ot_hash * insert_point = binary_search( info_hash, (*adding_to)->list, (*adding_to)->size, OT_HASH_COMPARE_SIZE, sizeof(ot_hash), &exactmatch ); | 311 | ot_hash *insert_point = binary_search(info_hash, (*adding_to)->list, (*adding_to)->size, OT_HASH_COMPARE_SIZE, sizeof(ot_hash), &exactmatch); |
| 312 | 312 | ||
| 313 | /* Only if the info hash is not in the adding_to list, create a new head with that entry */ | 313 | /* Only if the info hash is not in the adding_to list, create a new head with that entry */ |
| 314 | if (!exactmatch) { | 314 | if (!exactmatch) { |
| 315 | ot_accesslist * accesslist_new = accesslist_make(*adding_to, (*adding_to)->size + 1); | 315 | ot_accesslist *accesslist_new = accesslist_make(*adding_to, (*adding_to)->size + 1); |
| 316 | ptrdiff_t off = insert_point - (*adding_to)->list; | 316 | ptrdiff_t off = insert_point - (*adding_to)->list; |
| 317 | if (accesslist_new) { | 317 | if (accesslist_new) { |
| 318 | memcpy(accesslist_new->list, (*adding_to)->list, sizeof(ot_hash) * off); | 318 | memcpy(accesslist_new->list, (*adding_to)->list, sizeof(ot_hash) * off); |
| 319 | memcpy(accesslist_new->list + off, info_hash, sizeof(info_hash)); | 319 | memcpy(accesslist_new->list + off, info_hash, sizeof(info_hash)); |
| @@ -331,29 +331,29 @@ printf("parsed info_hash %20s\n", info_hash); | |||
| 331 | return NULL; | 331 | return NULL; |
| 332 | } | 332 | } |
| 333 | 333 | ||
| 334 | static void * accesslist_adder_worker( void * args ) { | 334 | static void *accesslist_adder_worker(void *args) { |
| 335 | (void)args; | 335 | (void)args; |
| 336 | return accesslist_adddel_worker(g_accesslist_pipe_add, &g_accesslist_add, &g_accesslist_delete); | 336 | return accesslist_adddel_worker(g_accesslist_pipe_add, &g_accesslist_add, &g_accesslist_delete); |
| 337 | } | 337 | } |
| 338 | static void * accesslist_deleter_worker( void * args ) { | 338 | static void *accesslist_deleter_worker(void *args) { |
| 339 | (void)args; | 339 | (void)args; |
| 340 | return accesslist_adddel_worker(g_accesslist_pipe_delete, &g_accesslist_delete, &g_accesslist_add); | 340 | return accesslist_adddel_worker(g_accesslist_pipe_delete, &g_accesslist_delete, &g_accesslist_add); |
| 341 | } | 341 | } |
| 342 | #endif | 342 | #endif |
| 343 | 343 | ||
| 344 | static pthread_t thread_id; | 344 | static pthread_t thread_id; |
| 345 | void accesslist_init( ) { | 345 | void accesslist_init() { |
| 346 | pthread_mutex_init(&g_accesslist_mutex, NULL); | 346 | pthread_mutex_init(&g_accesslist_mutex, NULL); |
| 347 | pthread_create( &thread_id, NULL, accesslist_worker, NULL ); | 347 | pthread_create(&thread_id, NULL, accesslist_worker, NULL); |
| 348 | #ifdef WANT_DYNAMIC_ACCESSLIST | 348 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| 349 | if (g_accesslist_pipe_add) | 349 | if (g_accesslist_pipe_add) |
| 350 | pthread_create( &thread_adder_id, NULL, accesslist_adder_worker, NULL ); | 350 | pthread_create(&thread_adder_id, NULL, accesslist_adder_worker, NULL); |
| 351 | if (g_accesslist_pipe_delete) | 351 | if (g_accesslist_pipe_delete) |
| 352 | pthread_create( &thread_deleter_id, NULL, accesslist_deleter_worker, NULL ); | 352 | pthread_create(&thread_deleter_id, NULL, accesslist_deleter_worker, NULL); |
| 353 | #endif | 353 | #endif |
| 354 | } | 354 | } |
| 355 | 355 | ||
| 356 | void accesslist_deinit( void ) { | 356 | void accesslist_deinit(void) { |
| 357 | /* Wake up sleeping worker */ | 357 | /* Wake up sleeping worker */ |
| 358 | pthread_kill(thread_id, SIGHUP); | 358 | pthread_kill(thread_id, SIGHUP); |
| 359 | 359 | ||
| @@ -362,16 +362,16 @@ void accesslist_deinit( void ) { | |||
| 362 | g_accesslist = accesslist_free(g_accesslist); | 362 | g_accesslist = accesslist_free(g_accesslist); |
| 363 | 363 | ||
| 364 | #ifdef WANT_DYNAMIC_ACCESSLIST | 364 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| 365 | g_accesslist_add = accesslist_free(g_accesslist_add); | 365 | g_accesslist_add = accesslist_free(g_accesslist_add); |
| 366 | g_accesslist_delete = accesslist_free(g_accesslist_delete); | 366 | g_accesslist_delete = accesslist_free(g_accesslist_delete); |
| 367 | #endif | 367 | #endif |
| 368 | 368 | ||
| 369 | pthread_mutex_unlock(&g_accesslist_mutex); | 369 | pthread_mutex_unlock(&g_accesslist_mutex); |
| 370 | pthread_cancel( thread_id ); | 370 | pthread_cancel(thread_id); |
| 371 | pthread_mutex_destroy(&g_accesslist_mutex); | 371 | pthread_mutex_destroy(&g_accesslist_mutex); |
| 372 | } | 372 | } |
| 373 | 373 | ||
| 374 | void accesslist_cleanup( void ) { | 374 | void accesslist_cleanup(void) { |
| 375 | pthread_mutex_lock(&g_accesslist_mutex); | 375 | pthread_mutex_lock(&g_accesslist_mutex); |
| 376 | 376 | ||
| 377 | accesslist_clean(g_accesslist); | 377 | accesslist_clean(g_accesslist); |
| @@ -384,35 +384,34 @@ void accesslist_cleanup( void ) { | |||
| 384 | } | 384 | } |
| 385 | #endif | 385 | #endif |
| 386 | 386 | ||
| 387 | int address_in_net( const ot_ip6 address, const ot_net *net ) { | 387 | int address_in_net(const ot_ip6 address, const ot_net *net) { |
| 388 | int bits = net->bits; | 388 | int bits = net->bits, checkbits = (0x7f00 >> (bits & 7)); |
| 389 | int result = memcmp( address, &net->address, bits >> 3 ); | 389 | int result = memcmp(address, &net->address, bits >> 3); |
| 390 | if( !result && ( bits & 7 ) ) | 390 | if (!result && (bits & 7)) |
| 391 | result = ( ( 0x7f00 >> ( bits & 7 ) ) & address[bits>>3] ) - net->address[bits>>3]; | 391 | result = (checkbits & address[bits >> 3]) - (checkbits & net->address[bits >> 3]); |
| 392 | return result == 0; | 392 | return result == 0; |
| 393 | } | 393 | } |
| 394 | 394 | ||
| 395 | void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size ) { | 395 | void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size) { |
| 396 | size_t i; | 396 | size_t i; |
| 397 | int exactmatch; | 397 | int exactmatch; |
| 398 | 398 | ||
| 399 | /* Caller must have a concept of ot_net in it's member */ | 399 | /* Caller must have a concept of ot_net in it's member */ |
| 400 | if( member_size < sizeof(ot_net) ) | 400 | if (member_size < sizeof(ot_net)) |
| 401 | return 0; | 401 | return 0; |
| 402 | 402 | ||
| 403 | /* Check each net in vector for overlap */ | 403 | /* Check each net in vector for overlap */ |
| 404 | uint8_t *member = ((uint8_t*)vector->data); | 404 | uint8_t *member = ((uint8_t *)vector->data); |
| 405 | for( i=0; i<vector->size; ++i ) { | 405 | for (i = 0; i < vector->size; ++i) { |
| 406 | if( address_in_net( *(ot_ip6*)member, net ) || | 406 | if (address_in_net(*(ot_ip6 *)member, net) || address_in_net(net->address, (ot_net *)member)) |
| 407 | address_in_net( net->address, (ot_net*)member ) ) | ||
| 408 | return 0; | 407 | return 0; |
| 409 | member += member_size; | 408 | member += member_size; |
| 410 | } | 409 | } |
| 411 | 410 | ||
| 412 | member = vector_find_or_insert( vector, (void*)net, member_size, sizeof(ot_net), &exactmatch ); | 411 | member = vector_find_or_insert(vector, (void *)net, member_size, sizeof(ot_net), &exactmatch); |
| 413 | if( member ) { | 412 | if (member) { |
| 414 | memcpy( member, net, sizeof(ot_net)); | 413 | memcpy(member, net, sizeof(ot_net)); |
| 415 | memcpy( member + sizeof(ot_net), value, member_size - sizeof(ot_net)); | 414 | memcpy(member + sizeof(ot_net), value, member_size - sizeof(ot_net)); |
| 416 | } | 415 | } |
| 417 | 416 | ||
| 418 | return member; | 417 | return member; |
| @@ -420,43 +419,43 @@ void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value | |||
| 420 | 419 | ||
| 421 | /* Takes a vector filled with { ot_net net, uint8_t[x] value }; | 420 | /* Takes a vector filled with { ot_net net, uint8_t[x] value }; |
| 422 | Returns value associated with the net, or NULL if not found */ | 421 | Returns value associated with the net, or NULL if not found */ |
| 423 | void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size ) { | 422 | void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size) { |
| 424 | int exactmatch; | 423 | int exactmatch; |
| 425 | /* This binary search will return a pointer to the first non-containing network... */ | 424 | /* This binary search will return a pointer to the first non-containing network... */ |
| 426 | ot_net *net = binary_search( address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch ); | 425 | ot_net *net = binary_search(address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch); |
| 427 | if( !net ) | 426 | if (!net) |
| 428 | return NULL; | 427 | return NULL; |
| 429 | /* ... so we'll need to move back one step unless we've exactly hit the first address in network */ | 428 | /* ... so we'll need to move back one step unless we've exactly hit the first address in network */ |
| 430 | if( !exactmatch && ( (void*)net > vector->data ) ) | 429 | if (!exactmatch && ((void *)net > vector->data)) |
| 431 | --net; | 430 | --net; |
| 432 | if( !address_in_net( address, net ) ) | 431 | if (!address_in_net(address, net)) |
| 433 | return NULL; | 432 | return NULL; |
| 434 | return (void*)net; | 433 | return (void *)net; |
| 435 | } | 434 | } |
| 436 | 435 | ||
| 437 | #ifdef WANT_FULLLOG_NETWORKS | 436 | #ifdef WANT_FULLLOG_NETWORKS |
| 438 | static ot_vector g_lognets_list; | 437 | static ot_vector g_lognets_list; |
| 439 | ot_log *g_logchain_first, *g_logchain_last; | 438 | ot_log *g_logchain_first, *g_logchain_last; |
| 440 | |||
| 441 | static pthread_mutex_t g_lognets_list_mutex = PTHREAD_MUTEX_INITIALIZER; | 439 | static pthread_mutex_t g_lognets_list_mutex = PTHREAD_MUTEX_INITIALIZER; |
| 442 | void loglist_add_network( const ot_net *net ) { | 440 | |
| 441 | void loglist_add_network(const ot_net *net) { | ||
| 443 | pthread_mutex_lock(&g_lognets_list_mutex); | 442 | pthread_mutex_lock(&g_lognets_list_mutex); |
| 444 | set_value_for_net( net, &g_lognets_list, NULL, sizeof(ot_net)); | 443 | set_value_for_net(net, &g_lognets_list, NULL, sizeof(ot_net)); |
| 445 | pthread_mutex_unlock(&g_lognets_list_mutex); | 444 | pthread_mutex_unlock(&g_lognets_list_mutex); |
| 446 | } | 445 | } |
| 447 | 446 | ||
| 448 | void loglist_reset( ) { | 447 | void loglist_reset() { |
| 449 | pthread_mutex_lock(&g_lognets_list_mutex); | 448 | pthread_mutex_lock(&g_lognets_list_mutex); |
| 450 | free( g_lognets_list.data ); | 449 | free(g_lognets_list.data); |
| 451 | g_lognets_list.data = 0; | 450 | g_lognets_list.data = 0; |
| 452 | g_lognets_list.size = g_lognets_list.space = 0; | 451 | g_lognets_list.size = g_lognets_list.space = 0; |
| 453 | pthread_mutex_unlock(&g_lognets_list_mutex); | 452 | pthread_mutex_unlock(&g_lognets_list_mutex); |
| 454 | } | 453 | } |
| 455 | 454 | ||
| 456 | int loglist_check_address( const ot_ip6 address ) { | 455 | int loglist_check_address(const ot_ip6 address) { |
| 457 | int result; | 456 | int result; |
| 458 | pthread_mutex_lock(&g_lognets_list_mutex); | 457 | pthread_mutex_lock(&g_lognets_list_mutex); |
| 459 | result = ( NULL != get_value_for_net( address, &g_lognets_list, sizeof(ot_net)) ); | 458 | result = (NULL != get_value_for_net(address, &g_lognets_list, sizeof(ot_net))); |
| 460 | pthread_mutex_unlock(&g_lognets_list_mutex); | 459 | pthread_mutex_unlock(&g_lognets_list_mutex); |
| 461 | return result; | 460 | return result; |
| 462 | } | 461 | } |
| @@ -464,44 +463,44 @@ int loglist_check_address( const ot_ip6 address ) { | |||
| 464 | 463 | ||
| 465 | #ifdef WANT_IP_FROM_PROXY | 464 | #ifdef WANT_IP_FROM_PROXY |
| 466 | typedef struct { | 465 | typedef struct { |
| 467 | ot_net *proxy; | 466 | ot_net *proxy; |
| 468 | ot_vector networks; | 467 | ot_vector networks; |
| 469 | } ot_proxymap; | 468 | } ot_proxymap; |
| 470 | 469 | ||
| 471 | static ot_vector g_proxies_list; | 470 | static ot_vector g_proxies_list; |
| 472 | static pthread_mutex_t g_proxies_list_mutex = PTHREAD_MUTEX_INITIALIZER; | 471 | static pthread_mutex_t g_proxies_list_mutex = PTHREAD_MUTEX_INITIALIZER; |
| 473 | 472 | ||
| 474 | int proxylist_add_network( const ot_net *proxy, const ot_net *net ) { | 473 | int proxylist_add_network(const ot_net *proxy, const ot_net *net) { |
| 475 | ot_proxymap *map; | 474 | ot_proxymap *map; |
| 476 | int exactmatch, result = 1; | 475 | int exactmatch, result = 1; |
| 477 | pthread_mutex_lock(&g_proxies_list_mutex); | 476 | pthread_mutex_lock(&g_proxies_list_mutex); |
| 478 | 477 | ||
| 479 | /* If we have a direct hit, use and extend the vector there */ | 478 | /* If we have a direct hit, use and extend the vector there */ |
| 480 | map = binary_search( proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch ); | 479 | map = binary_search(proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch); |
| 481 | 480 | ||
| 482 | if( !map || !exactmatch ) { | 481 | if (!map || !exactmatch) { |
| 483 | /* else see, if we've got overlapping networks | 482 | /* else see, if we've got overlapping networks |
| 484 | and get a new empty vector if not */ | 483 | and get a new empty vector if not */ |
| 485 | ot_vector empty; | 484 | ot_vector empty; |
| 486 | memset( &empty, 0, sizeof( ot_vector ) ); | 485 | memset(&empty, 0, sizeof(ot_vector)); |
| 487 | map = set_value_for_net( proxy, &g_proxies_list, &empty, sizeof(ot_proxymap)); | 486 | map = set_value_for_net(proxy, &g_proxies_list, &empty, sizeof(ot_proxymap)); |
| 488 | } | 487 | } |
| 489 | 488 | ||
| 490 | if( map && set_value_for_net( net, &map->networks, NULL, sizeof(ot_net) ) ) | 489 | if (map && set_value_for_net(net, &map->networks, NULL, sizeof(ot_net))) |
| 491 | result = 1; | 490 | result = 1; |
| 492 | 491 | ||
| 493 | pthread_mutex_unlock(&g_proxies_list_mutex); | 492 | pthread_mutex_unlock(&g_proxies_list_mutex); |
| 494 | return result; | 493 | return result; |
| 495 | } | 494 | } |
| 496 | 495 | ||
| 497 | int proxylist_check_proxy( const ot_ip6 proxy, const ot_ip6 address ) { | 496 | int proxylist_check_proxy(const ot_ip6 proxy, const ot_ip6 address) { |
| 498 | int result = 0; | 497 | int result = 0; |
| 499 | ot_proxymap *map; | 498 | ot_proxymap *map; |
| 500 | 499 | ||
| 501 | pthread_mutex_lock(&g_proxies_list_mutex); | 500 | pthread_mutex_lock(&g_proxies_list_mutex); |
| 502 | 501 | ||
| 503 | if( ( map = get_value_for_net( proxy, &g_proxies_list, sizeof(ot_proxymap) ) ) ) | 502 | if ((map = get_value_for_net(proxy, &g_proxies_list, sizeof(ot_proxymap)))) |
| 504 | if( !address || get_value_for_net( address, &map->networks, sizeof(ot_net) ) ) | 503 | if (!address || get_value_for_net(address, &map->networks, sizeof(ot_net))) |
| 505 | result = 1; | 504 | result = 1; |
| 506 | 505 | ||
| 507 | pthread_mutex_unlock(&g_proxies_list_mutex); | 506 | pthread_mutex_unlock(&g_proxies_list_mutex); |
| @@ -514,46 +513,49 @@ static ot_net g_admin_nets[OT_ADMINIP_MAX]; | |||
| 514 | static ot_permissions g_admin_nets_permissions[OT_ADMINIP_MAX]; | 513 | static ot_permissions g_admin_nets_permissions[OT_ADMINIP_MAX]; |
| 515 | static unsigned int g_admin_nets_count = 0; | 514 | static unsigned int g_admin_nets_count = 0; |
| 516 | 515 | ||
| 517 | int accesslist_bless_net( ot_net *net, ot_permissions permissions ) { | 516 | int accesslist_bless_net(ot_net *net, ot_permissions permissions) { |
| 518 | if( g_admin_nets_count >= OT_ADMINIP_MAX ) | 517 | if (g_admin_nets_count >= OT_ADMINIP_MAX) |
| 519 | return -1; | 518 | return -1; |
| 520 | 519 | ||
| 521 | memcpy(g_admin_nets + g_admin_nets_count, net, sizeof(ot_net)); | 520 | memcpy(g_admin_nets + g_admin_nets_count, net, sizeof(ot_net)); |
| 522 | g_admin_nets_permissions[ g_admin_nets_count++ ] = permissions; | 521 | g_admin_nets_permissions[g_admin_nets_count++] = permissions; |
| 523 | 522 | ||
| 524 | #ifdef _DEBUG | 523 | #ifdef _DEBUG |
| 525 | { | 524 | { |
| 526 | char _debug[512]; | 525 | char _debug[512]; |
| 527 | int off = snprintf( _debug, sizeof(_debug), "Blessing ip net " ); | 526 | int off = snprintf(_debug, sizeof(_debug), "Blessing ip net "); |
| 528 | off += fmt_ip6c(_debug+off, net->address ); | 527 | off += fmt_ip6c(_debug + off, net->address); |
| 529 | if( net->bits < 128) { | 528 | if (net->bits < 128) { |
| 530 | _debug[off++] = '/'; | 529 | _debug[off++] = '/'; |
| 531 | if( ip6_isv4mapped(net->address) ) | 530 | if (ip6_isv4mapped(net->address)) |
| 532 | off += fmt_long(_debug+off, net->bits-96); | 531 | off += fmt_long(_debug + off, net->bits - 96); |
| 533 | else | 532 | else |
| 534 | off += fmt_long(_debug+off, net->bits); | 533 | off += fmt_long(_debug + off, net->bits); |
| 535 | } | 534 | } |
| 536 | 535 | ||
| 537 | if( permissions & OT_PERMISSION_MAY_STAT ) off += snprintf( _debug+off, 512-off, " may_fetch_stats" ); | 536 | if (permissions & OT_PERMISSION_MAY_STAT) |
| 538 | if( permissions & OT_PERMISSION_MAY_LIVESYNC ) off += snprintf( _debug+off, 512-off, " may_sync_live" ); | 537 | off += snprintf(_debug + off, 512 - off, " may_fetch_stats"); |
| 539 | if( permissions & OT_PERMISSION_MAY_FULLSCRAPE ) off += snprintf( _debug+off, 512-off, " may_fetch_fullscrapes" ); | 538 | if (permissions & OT_PERMISSION_MAY_LIVESYNC) |
| 540 | if( permissions & OT_PERMISSION_MAY_PROXY ) off += snprintf( _debug+off, 512-off, " may_proxy" ); | 539 | off += snprintf(_debug + off, 512 - off, " may_sync_live"); |
| 541 | if( !permissions ) off += snprintf( _debug+off, sizeof(_debug)-off, " nothing" ); | 540 | if (permissions & OT_PERMISSION_MAY_FULLSCRAPE) |
| 541 | off += snprintf(_debug + off, 512 - off, " may_fetch_fullscrapes"); | ||
| 542 | if (permissions & OT_PERMISSION_MAY_PROXY) | ||
| 543 | off += snprintf(_debug + off, 512 - off, " may_proxy"); | ||
| 544 | if (!permissions) | ||
| 545 | off += snprintf(_debug + off, sizeof(_debug) - off, " nothing"); | ||
| 542 | _debug[off++] = '.'; | 546 | _debug[off++] = '.'; |
| 543 | _debug[off++] = '\n'; | 547 | _debug[off++] = '\n'; |
| 544 | (void)write( 2, _debug, off ); | 548 | (void)write(2, _debug, off); |
| 545 | } | 549 | } |
| 546 | #endif | 550 | #endif |
| 547 | 551 | ||
| 548 | return 0; | 552 | return 0; |
| 549 | } | 553 | } |
| 550 | 554 | ||
| 551 | int accesslist_is_blessed( ot_ip6 ip, ot_permissions permissions ) { | 555 | int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions) { |
| 552 | unsigned int i; | 556 | unsigned int i; |
| 553 | for( i=0; i<g_admin_nets_count; ++i ) | 557 | for (i = 0; i < g_admin_nets_count; ++i) |
| 554 | if( address_in_net(ip, g_admin_nets + i) && (g_admin_nets_permissions[ i ] & permissions )) | 558 | if (address_in_net(ip, g_admin_nets + i) && (g_admin_nets_permissions[i] & permissions)) |
| 555 | return 1; | 559 | return 1; |
| 556 | return 0; | 560 | return 0; |
| 557 | } | 561 | } |
| 558 | |||
| 559 | const char *g_version_accesslist_c = "$Source$: $Revision$\n"; | ||
diff --git a/ot_accesslist.h b/ot_accesslist.h index a988791..0a7488e 100644 --- a/ot_accesslist.h +++ b/ot_accesslist.h | |||
| @@ -6,16 +6,18 @@ | |||
| 6 | #ifndef OT_ACCESSLIST_H__ | 6 | #ifndef OT_ACCESSLIST_H__ |
| 7 | #define OT_ACCESSLIST_H__ | 7 | #define OT_ACCESSLIST_H__ |
| 8 | 8 | ||
| 9 | #if defined ( WANT_ACCESSLIST_BLACK ) && defined ( WANT_ACCESSLIST_WHITE ) | 9 | #include "trackerlogic.h" |
| 10 | # error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive. | 10 | |
| 11 | #if defined(WANT_ACCESSLIST_BLACK) && defined(WANT_ACCESSLIST_WHITE) | ||
| 12 | #error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive. | ||
| 11 | #endif | 13 | #endif |
| 12 | 14 | ||
| 13 | #if defined ( WANT_ACCESSLIST_BLACK ) || defined (WANT_ACCESSLIST_WHITE ) | 15 | #if defined(WANT_ACCESSLIST_BLACK) || defined(WANT_ACCESSLIST_WHITE) |
| 14 | #define WANT_ACCESSLIST | 16 | #define WANT_ACCESSLIST |
| 15 | void accesslist_init( void ); | 17 | void accesslist_init(void); |
| 16 | void accesslist_deinit( void ); | 18 | void accesslist_deinit(void); |
| 17 | int accesslist_hashisvalid( ot_hash hash ); | 19 | int accesslist_hashisvalid(ot_hash hash); |
| 18 | void accesslist_cleanup( void ); | 20 | void accesslist_cleanup(void); |
| 19 | 21 | ||
| 20 | extern char *g_accesslist_filename; | 22 | extern char *g_accesslist_filename; |
| 21 | #ifdef WANT_DYNAMIC_ACCESSLIST | 23 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| @@ -25,16 +27,16 @@ extern char *g_accesslist_pipe_delete; | |||
| 25 | 27 | ||
| 26 | #else | 28 | #else |
| 27 | #ifdef WANT_DYNAMIC_ACCESSLIST | 29 | #ifdef WANT_DYNAMIC_ACCESSLIST |
| 28 | # error WANT_DYNAMIC_ACCESSLIST needs either WANT_ACCESSLIST_BLACK or WANT_ACCESSLIST_WHITE | 30 | #error WANT_DYNAMIC_ACCESSLIST needs either WANT_ACCESSLIST_BLACK or WANT_ACCESSLIST_WHITE |
| 29 | #endif | 31 | #endif |
| 30 | 32 | ||
| 31 | #define accesslist_init( accesslist_filename ) | 33 | #define accesslist_init(accesslist_filename) |
| 32 | #define accesslist_deinit( ) | 34 | #define accesslist_deinit() |
| 33 | #define accesslist_hashisvalid( hash ) 1 | 35 | #define accesslist_hashisvalid(hash) 1 |
| 34 | #endif | 36 | #endif |
| 35 | 37 | ||
| 36 | /* Test if an address is subset of an ot_net, return value is considered a bool */ | 38 | /* Test if an address is subset of an ot_net, return value is considered a bool */ |
| 37 | int address_in_net( const ot_ip6 address, const ot_net *net ); | 39 | int address_in_net(const ot_ip6 address, const ot_net *net); |
| 38 | 40 | ||
| 39 | /* Store a value into a vector of struct { ot_net net, uint8_t[x] value } member; | 41 | /* Store a value into a vector of struct { ot_net net, uint8_t[x] value } member; |
| 40 | returns NULL | 42 | returns NULL |
| @@ -45,18 +47,17 @@ int address_in_net( const ot_ip6 address, const ot_net *net ); | |||
| 45 | returns pointer to new member in vector for success | 47 | returns pointer to new member in vector for success |
| 46 | member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping | 48 | member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping |
| 47 | */ | 49 | */ |
| 48 | void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size ); | 50 | void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size); |
| 49 | 51 | ||
| 50 | /* Takes a vector filled with struct { ot_net net, uint8_t[x] value } member; | 52 | /* Takes a vector filled with struct { ot_net net, uint8_t[x] value } member; |
| 51 | Returns pointer to _member_ associated with the net, or NULL if not found | 53 | Returns pointer to _member_ associated with the net, or NULL if not found |
| 52 | member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping | 54 | member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping |
| 53 | */ | 55 | */ |
| 54 | void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size ); | 56 | void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size); |
| 55 | |||
| 56 | 57 | ||
| 57 | #ifdef WANT_IP_FROM_PROXY | 58 | #ifdef WANT_IP_FROM_PROXY |
| 58 | int proxylist_add_network( const ot_net *proxy, const ot_net *net ); | 59 | int proxylist_add_network(const ot_net *proxy, const ot_net *net); |
| 59 | int proxylist_check_network( const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */ ); | 60 | int proxylist_check_network(const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */); |
| 60 | #endif | 61 | #endif |
| 61 | 62 | ||
| 62 | #ifdef WANT_FULLLOG_NETWORKS | 63 | #ifdef WANT_FULLLOG_NETWORKS |
| @@ -70,10 +71,10 @@ struct ot_log { | |||
| 70 | }; | 71 | }; |
| 71 | extern ot_log *g_logchain_first, *g_logchain_last; | 72 | extern ot_log *g_logchain_first, *g_logchain_last; |
| 72 | 73 | ||
| 73 | void loglist_add_network( const ot_net *net ); | 74 | void loglist_add_network(const ot_net *net); |
| 74 | void loglist_reset( ); | 75 | void loglist_reset(); |
| 75 | int loglist_check_address( const ot_ip6 address ); | 76 | int loglist_check_address(const ot_ip6 address); |
| 76 | #endif | 77 | #endif |
| 77 | 78 | ||
| 78 | typedef enum { | 79 | typedef enum { |
| 79 | OT_PERMISSION_MAY_FULLSCRAPE = 0x1, | 80 | OT_PERMISSION_MAY_FULLSCRAPE = 0x1, |
| @@ -82,7 +83,7 @@ typedef enum { | |||
| 82 | OT_PERMISSION_MAY_PROXY = 0x8 | 83 | OT_PERMISSION_MAY_PROXY = 0x8 |
| 83 | } ot_permissions; | 84 | } ot_permissions; |
| 84 | 85 | ||
| 85 | int accesslist_bless_net( ot_net *net, ot_permissions permissions ); | 86 | int accesslist_bless_net(ot_net *net, ot_permissions permissions); |
| 86 | int accesslist_is_blessed( ot_ip6 ip, ot_permissions permissions ); | 87 | int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions); |
| 87 | 88 | ||
| 88 | #endif | 89 | #endif |
| @@ -5,90 +5,91 @@ | |||
| 5 | 5 | ||
| 6 | /* System */ | 6 | /* System */ |
| 7 | #include <pthread.h> | 7 | #include <pthread.h> |
| 8 | #include <unistd.h> | ||
| 9 | #include <string.h> | 8 | #include <string.h> |
| 9 | #include <unistd.h> | ||
| 10 | 10 | ||
| 11 | /* Libowfat */ | 11 | /* Libowfat */ |
| 12 | #include "io.h" | 12 | #include "io.h" |
| 13 | 13 | ||
| 14 | /* Opentracker */ | 14 | /* Opentracker */ |
| 15 | #include "trackerlogic.h" | 15 | #include "ot_accesslist.h" |
| 16 | #include "ot_mutex.h" | ||
| 17 | #include "ot_vector.h" | ||
| 18 | #include "ot_clean.h" | 16 | #include "ot_clean.h" |
| 17 | #include "ot_mutex.h" | ||
| 19 | #include "ot_stats.h" | 18 | #include "ot_stats.h" |
| 20 | #include "ot_accesslist.h" | 19 | #include "ot_vector.h" |
| 20 | #include "trackerlogic.h" | ||
| 21 | 21 | ||
| 22 | /* Returns amount of removed peers */ | 22 | /* Returns amount of removed peers */ |
| 23 | static ssize_t clean_single_bucket( ot_peer *peers, size_t peer_count, time_t timedout, int *removed_seeders ) { | 23 | static ssize_t clean_single_bucket(ot_peer *peers, size_t peer_count, size_t peer_size, time_t timedout, int *removed_seeders) { |
| 24 | ot_peer *last_peer = peers + peer_count, *insert_point; | 24 | ot_peer *last_peer = peers + peer_count * peer_size, *insert_point; |
| 25 | time_t timediff; | ||
| 26 | 25 | ||
| 27 | /* Two scan modes: unless there is one peer removed, just increase ot_peertime */ | 26 | /* Two scan modes: unless there is one peer removed, just increase ot_peertime */ |
| 28 | while( peers < last_peer ) { | 27 | while (peers < last_peer) { |
| 29 | if( ( timediff = timedout + OT_PEERTIME( peers ) ) >= OT_PEER_TIMEOUT ) | 28 | time_t timediff = timedout + OT_PEERTIME(peers, peer_size); |
| 29 | if (timediff >= OT_PEER_TIMEOUT) | ||
| 30 | break; | 30 | break; |
| 31 | OT_PEERTIME( peers++ ) = timediff; | 31 | OT_PEERTIME(peers, peer_size) = timediff; |
| 32 | peers += peer_size; | ||
| 33 | } | ||
| 34 | |||
| 35 | /* If we at least remove one peer, we have to copy */ | ||
| 36 | for (insert_point = peers; peers < last_peer; peers += peer_size) { | ||
| 37 | time_t timediff = timedout + OT_PEERTIME(peers, peer_size); | ||
| 38 | |||
| 39 | if (timediff < OT_PEER_TIMEOUT) { | ||
| 40 | OT_PEERTIME(peers, peer_size) = timediff; | ||
| 41 | memcpy(insert_point, peers, peer_size); | ||
| 42 | insert_point += peer_size; | ||
| 43 | } else if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING) | ||
| 44 | (*removed_seeders)++; | ||
| 32 | } | 45 | } |
| 33 | 46 | ||
| 34 | /* If we at least remove one peer, we have to copy */ | 47 | return (peers - insert_point) / peer_size; |
| 35 | insert_point = peers; | ||
| 36 | while( peers < last_peer ) | ||
| 37 | if( ( timediff = timedout + OT_PEERTIME( peers ) ) < OT_PEER_TIMEOUT ) { | ||
| 38 | OT_PEERTIME( peers ) = timediff; | ||
| 39 | memcpy( insert_point++, peers++, sizeof(ot_peer)); | ||
| 40 | } else | ||
| 41 | if( OT_PEERFLAG( peers++ ) & PEER_FLAG_SEEDING ) | ||
| 42 | (*removed_seeders)++; | ||
| 43 | |||
| 44 | return peers - insert_point; | ||
| 45 | } | 48 | } |
| 46 | 49 | ||
| 47 | /* Clean a single torrent | 50 | int clean_single_peer_list(ot_peerlist *peer_list, size_t peer_size) { |
| 48 | return 1 if torrent timed out | 51 | ot_vector *peer_vector = &peer_list->peers; |
| 49 | */ | 52 | time_t timedout = (time_t)(g_now_minutes - peer_list->base); |
| 50 | int clean_single_torrent( ot_torrent *torrent ) { | 53 | int num_buckets = 1, removed_seeders = 0; |
| 51 | ot_peerlist *peer_list = torrent->peer_list; | ||
| 52 | ot_vector *bucket_list = &peer_list->peers; | ||
| 53 | time_t timedout = (time_t)( g_now_minutes - peer_list->base ); | ||
| 54 | int num_buckets = 1, removed_seeders = 0; | ||
| 55 | 54 | ||
| 56 | /* No need to clean empty torrent */ | 55 | /* No need to clean empty torrent */ |
| 57 | if( !timedout ) | 56 | if (!timedout) |
| 58 | return 0; | 57 | return 0; |
| 59 | 58 | ||
| 60 | /* Torrent has idled out */ | 59 | /* Torrent has idled out */ |
| 61 | if( timedout > OT_TORRENT_TIMEOUT ) | 60 | if (timedout > OT_TORRENT_TIMEOUT) |
| 62 | return 1; | 61 | return 1; |
| 63 | 62 | ||
| 64 | /* Nothing to be cleaned here? Test if torrent is worth keeping */ | 63 | /* Nothing to be cleaned here? Test if torrent is worth keeping */ |
| 65 | if( timedout > OT_PEER_TIMEOUT ) { | 64 | if (timedout > OT_PEER_TIMEOUT) { |
| 66 | if( !peer_list->peer_count ) | 65 | if (!peer_list->peer_count) |
| 67 | return peer_list->down_count ? 0 : 1; | 66 | return peer_list->down_count ? 0 : 1; |
| 68 | timedout = OT_PEER_TIMEOUT; | 67 | timedout = OT_PEER_TIMEOUT; |
| 69 | } | 68 | } |
| 70 | 69 | ||
| 71 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { | 70 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
| 72 | num_buckets = bucket_list->size; | 71 | num_buckets = peer_vector->size; |
| 73 | bucket_list = (ot_vector *)bucket_list->data; | 72 | peer_vector = (ot_vector *)peer_vector->data; |
| 74 | } | 73 | } |
| 75 | 74 | ||
| 76 | while( num_buckets-- ) { | 75 | while (num_buckets--) { |
| 77 | size_t removed_peers = clean_single_bucket( bucket_list->data, bucket_list->size, timedout, &removed_seeders ); | 76 | size_t removed_peers = clean_single_bucket(peer_vector->data, peer_vector->size, peer_size, timedout, &removed_seeders); |
| 78 | peer_list->peer_count -= removed_peers; | 77 | peer_list->peer_count -= removed_peers; |
| 79 | bucket_list->size -= removed_peers; | 78 | peer_vector->size -= removed_peers; |
| 80 | if( bucket_list->size < removed_peers ) | 79 | if (removed_peers) |
| 81 | vector_fixup_peers( bucket_list ); | 80 | vector_fixup_peers(peer_vector, peer_size); |
| 82 | ++bucket_list; | 81 | |
| 82 | /* Skip to next bucket, a vector containing peers */ | ||
| 83 | ++peer_vector; | ||
| 83 | } | 84 | } |
| 84 | 85 | ||
| 85 | peer_list->seed_count -= removed_seeders; | 86 | peer_list->seed_count -= removed_seeders; |
| 86 | 87 | ||
| 87 | /* See, if we need to convert a torrent from simple vector to bucket list */ | 88 | /* See if we need to convert a torrent from simple vector to bucket list */ |
| 88 | if( ( peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT ) || OT_PEERLIST_HASBUCKETS(peer_list) ) | 89 | if ((peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT) || OT_PEERLIST_HASBUCKETS(peer_list)) |
| 89 | vector_redistribute_buckets( peer_list ); | 90 | vector_redistribute_buckets(peer_list, peer_size); |
| 90 | 91 | ||
| 91 | if( peer_list->peer_count ) | 92 | if (peer_list->peer_count) |
| 92 | peer_list->base = g_now_minutes; | 93 | peer_list->base = g_now_minutes; |
| 93 | else { | 94 | else { |
| 94 | /* When we got here, the last time that torrent | 95 | /* When we got here, the last time that torrent |
| @@ -96,32 +97,38 @@ int clean_single_torrent( ot_torrent *torrent ) { | |||
| 96 | peer_list->base = g_now_minutes - OT_PEER_TIMEOUT; | 97 | peer_list->base = g_now_minutes - OT_PEER_TIMEOUT; |
| 97 | } | 98 | } |
| 98 | return 0; | 99 | return 0; |
| 100 | } | ||
| 99 | 101 | ||
| 102 | /* Clean a single torrent | ||
| 103 | return 1 if torrent timed out | ||
| 104 | */ | ||
| 105 | int clean_single_torrent(ot_torrent *torrent) { | ||
| 106 | return clean_single_peer_list(torrent->peer_list6, OT_PEER_SIZE6) * clean_single_peer_list(torrent->peer_list4, OT_PEER_SIZE4); | ||
| 100 | } | 107 | } |
| 101 | 108 | ||
| 102 | /* Clean up all peers in current bucket, remove timedout pools and | 109 | /* Clean up all peers in current bucket, remove timedout pools and |
| 103 | torrents */ | 110 | torrents */ |
| 104 | static void * clean_worker( void * args ) { | 111 | static void *clean_worker(void *args) { |
| 105 | (void) args; | 112 | (void)args; |
| 106 | while( 1 ) { | 113 | while (1) { |
| 107 | int bucket = OT_BUCKET_COUNT; | 114 | int bucket = OT_BUCKET_COUNT; |
| 108 | while( bucket-- ) { | 115 | while (bucket--) { |
| 109 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 116 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
| 110 | size_t toffs; | 117 | size_t toffs; |
| 111 | int delta_torrentcount = 0; | 118 | int delta_torrentcount = 0; |
| 112 | 119 | ||
| 113 | for( toffs=0; toffs<torrents_list->size; ++toffs ) { | 120 | for (toffs = 0; toffs < torrents_list->size; ++toffs) { |
| 114 | ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + toffs; | 121 | ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + toffs; |
| 115 | if( clean_single_torrent( torrent ) ) { | 122 | if (clean_single_torrent(torrent)) { |
| 116 | vector_remove_torrent( torrents_list, torrent ); | 123 | vector_remove_torrent(torrents_list, torrent); |
| 117 | --delta_torrentcount; | 124 | --delta_torrentcount; |
| 118 | --toffs; | 125 | --toffs; |
| 119 | } | 126 | } |
| 120 | } | 127 | } |
| 121 | mutex_bucket_unlock( bucket, delta_torrentcount ); | 128 | mutex_bucket_unlock(bucket, delta_torrentcount); |
| 122 | if( !g_opentracker_running ) | 129 | if (!g_opentracker_running) |
| 123 | return NULL; | 130 | return NULL; |
| 124 | usleep( OT_CLEAN_SLEEP ); | 131 | usleep(OT_CLEAN_SLEEP); |
| 125 | } | 132 | } |
| 126 | stats_cleanup(); | 133 | stats_cleanup(); |
| 127 | #ifdef WANT_ACCESSLIST | 134 | #ifdef WANT_ACCESSLIST |
| @@ -132,12 +139,6 @@ static void * clean_worker( void * args ) { | |||
| 132 | } | 139 | } |
| 133 | 140 | ||
| 134 | static pthread_t thread_id; | 141 | static pthread_t thread_id; |
| 135 | void clean_init( void ) { | 142 | void clean_init(void) { pthread_create(&thread_id, NULL, clean_worker, NULL); } |
| 136 | pthread_create( &thread_id, NULL, clean_worker, NULL ); | ||
| 137 | } | ||
| 138 | |||
| 139 | void clean_deinit( void ) { | ||
| 140 | pthread_cancel( thread_id ); | ||
| 141 | } | ||
| 142 | 143 | ||
| 143 | const char *g_version_clean_c = "$Source$: $Revision$\n"; | 144 | void clean_deinit(void) { pthread_cancel(thread_id); } |
| @@ -7,13 +7,13 @@ | |||
| 7 | #define OT_CLEAN_H__ | 7 | #define OT_CLEAN_H__ |
| 8 | 8 | ||
| 9 | /* The amount of time a clean cycle should take */ | 9 | /* The amount of time a clean cycle should take */ |
| 10 | #define OT_CLEAN_INTERVAL_MINUTES 2 | 10 | #define OT_CLEAN_INTERVAL_MINUTES 2 |
| 11 | 11 | ||
| 12 | /* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */ | 12 | /* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */ |
| 13 | #define OT_CLEAN_SLEEP ( ( ( OT_CLEAN_INTERVAL_MINUTES ) * 60 * 1000000 ) / ( OT_BUCKET_COUNT ) ) | 13 | #define OT_CLEAN_SLEEP (((OT_CLEAN_INTERVAL_MINUTES) * 60 * 1000000) / (OT_BUCKET_COUNT)) |
| 14 | 14 | ||
| 15 | void clean_init( void ); | 15 | void clean_init(void); |
| 16 | void clean_deinit( void ); | 16 | void clean_deinit(void); |
| 17 | int clean_single_torrent( ot_torrent *torrent ); | 17 | int clean_single_torrent(ot_torrent *torrent); |
| 18 | 18 | ||
| 19 | #endif | 19 | #endif |
diff --git a/ot_fullscrape.c b/ot_fullscrape.c index 5d115dc..6fd6d1c 100644 --- a/ot_fullscrape.c +++ b/ot_fullscrape.c | |||
| @@ -6,14 +6,18 @@ | |||
| 6 | #ifdef WANT_FULLSCRAPE | 6 | #ifdef WANT_FULLSCRAPE |
| 7 | 7 | ||
| 8 | /* System */ | 8 | /* System */ |
| 9 | #include <sys/param.h> | 9 | #include <arpa/inet.h> |
| 10 | #include <pthread.h> | ||
| 10 | #include <stdio.h> | 11 | #include <stdio.h> |
| 11 | #include <string.h> | 12 | #include <string.h> |
| 12 | #include <pthread.h> | 13 | #include <sys/param.h> |
| 13 | #include <arpa/inet.h> | ||
| 14 | #ifdef WANT_COMPRESSION_GZIP | 14 | #ifdef WANT_COMPRESSION_GZIP |
| 15 | #include <zlib.h> | 15 | #include <zlib.h> |
| 16 | #endif | 16 | #endif |
| 17 | #ifdef WANT_COMPRESSION_ZSTD | ||
| 18 | #include <zstd.h> | ||
| 19 | #endif | ||
| 20 | |||
| 17 | 21 | ||
| 18 | /* Libowfat */ | 22 | /* Libowfat */ |
| 19 | #include "byte.h" | 23 | #include "byte.h" |
| @@ -21,50 +25,64 @@ | |||
| 21 | #include "textcode.h" | 25 | #include "textcode.h" |
| 22 | 26 | ||
| 23 | /* Opentracker */ | 27 | /* Opentracker */ |
| 24 | #include "trackerlogic.h" | ||
| 25 | #include "ot_mutex.h" | ||
| 26 | #include "ot_iovec.h" | ||
| 27 | #include "ot_fullscrape.h" | 28 | #include "ot_fullscrape.h" |
| 29 | #include "ot_iovec.h" | ||
| 30 | #include "ot_mutex.h" | ||
| 31 | #include "trackerlogic.h" | ||
| 28 | 32 | ||
| 29 | /* Fetch full scrape info for all torrents | 33 | /* Fetch full scrape info for all torrents |
| 30 | Full scrapes usually are huge and one does not want to | 34 | Full scrapes usually are huge and one does not want to |
| 31 | allocate more memory. So lets get them in 512k units | 35 | allocate more memory. So lets get them in 512k units |
| 32 | */ | 36 | */ |
| 33 | #define OT_SCRAPE_CHUNK_SIZE (1024*1024) | 37 | #define OT_SCRAPE_CHUNK_SIZE (1024 * 1024) |
| 34 | 38 | ||
| 35 | /* "d8:completei%zde10:downloadedi%zde10:incompletei%zdee" */ | 39 | /* "d8:completei%zde10:downloadedi%zde10:incompletei%zdee" */ |
| 36 | #define OT_SCRAPE_MAXENTRYLEN 256 | 40 | #define OT_SCRAPE_MAXENTRYLEN 256 |
| 37 | 41 | ||
| 38 | /* Forward declaration */ | 42 | /* Forward declaration */ |
| 39 | static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ); | 43 | static void fullscrape_make(int taskid, ot_tasktype mode); |
| 40 | #ifdef WANT_COMPRESSION_GZIP | 44 | #ifdef WANT_COMPRESSION_GZIP |
| 41 | static void fullscrape_make_gzip( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ); | 45 | static void fullscrape_make_gzip(int taskid, ot_tasktype mode); |
| 46 | #endif | ||
| 47 | #ifdef WANT_COMPRESSION_ZSTD | ||
| 48 | static void fullscrape_make_zstd(int taskid, ot_tasktype mode); | ||
| 42 | #endif | 49 | #endif |
| 43 | 50 | ||
| 44 | /* Converter function from memory to human readable hex strings | 51 | /* Converter function from memory to human readable hex strings |
| 45 | XXX - Duplicated from ot_stats. Needs fix. */ | 52 | XXX - Duplicated from ot_stats. Needs fix. */ |
| 46 | static char*to_hex(char*d,uint8_t*s){char*m="0123456789ABCDEF";char *t=d;char*e=d+40;while(d<e){*d++=m[*s>>4];*d++=m[*s++&15];}*d=0;return t;} | 53 | static char *to_hex(char *d, uint8_t *s) { |
| 54 | char *m = "0123456789ABCDEF"; | ||
| 55 | char *t = d; | ||
| 56 | char *e = d + 40; | ||
| 57 | while (d < e) { | ||
| 58 | *d++ = m[*s >> 4]; | ||
| 59 | *d++ = m[*s++ & 15]; | ||
| 60 | } | ||
| 61 | *d = 0; | ||
| 62 | return t; | ||
| 63 | } | ||
| 47 | 64 | ||
| 48 | /* This is the entry point into this worker thread | 65 | /* This is the entry point into this worker thread |
| 49 | It grabs tasks from mutex_tasklist and delivers results back | 66 | It grabs tasks from mutex_tasklist and delivers results back |
| 50 | */ | 67 | */ |
| 51 | static void * fullscrape_worker( void * args ) { | 68 | static void *fullscrape_worker(void *args) { |
| 52 | int iovec_entries; | 69 | (void)args; |
| 53 | struct iovec *iovector; | ||
| 54 | 70 | ||
| 55 | (void) args; | 71 | while (g_opentracker_running) { |
| 56 | |||
| 57 | while( g_opentracker_running ) { | ||
| 58 | ot_tasktype tasktype = TASK_FULLSCRAPE; | 72 | ot_tasktype tasktype = TASK_FULLSCRAPE; |
| 59 | ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); | 73 | ot_taskid taskid = mutex_workqueue_poptask(&tasktype); |
| 74 | #ifdef WANT_COMPRESSION_ZSTD | ||
| 75 | if (tasktype & TASK_FLAG_ZSTD) | ||
| 76 | fullscrape_make_zstd(taskid, tasktype); | ||
| 77 | else | ||
| 78 | #endif | ||
| 60 | #ifdef WANT_COMPRESSION_GZIP | 79 | #ifdef WANT_COMPRESSION_GZIP |
| 61 | if (tasktype & TASK_FLAG_GZIP) | 80 | if (tasktype & TASK_FLAG_GZIP) |
| 62 | fullscrape_make_gzip( &iovec_entries, &iovector, tasktype ); | 81 | fullscrape_make_gzip(taskid, tasktype); |
| 63 | else | 82 | else |
| 64 | #endif | 83 | #endif |
| 65 | fullscrape_make( &iovec_entries, &iovector, tasktype ); | 84 | fullscrape_make(taskid, tasktype); |
| 66 | if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) | 85 | mutex_workqueue_pushchunked(taskid, NULL); |
| 67 | iovec_free( &iovec_entries, &iovector ); | ||
| 68 | } | 86 | } |
| 69 | return NULL; | 87 | return NULL; |
| 70 | } | 88 | } |
| @@ -82,76 +100,92 @@ void fullscrape_deliver( int64 sock, ot_tasktype tasktype ) { | |||
| 82 | mutex_workqueue_pushtask( sock, tasktype ); | 100 | mutex_workqueue_pushtask( sock, tasktype ); |
| 83 | } | 101 | } |
| 84 | 102 | ||
| 85 | static char * fullscrape_write_one( ot_tasktype mode, char *r, ot_peerlist *peer_list, ot_hash *hash ) { | 103 | static char * fullscrape_write_one( ot_tasktype mode, char *r, ot_torrent *torrent, ot_hash *hash ) { |
| 86 | switch( mode & TASK_TASK_MASK ) { | 104 | size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; |
| 87 | case TASK_FULLSCRAPE: | 105 | size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; |
| 88 | default: | 106 | size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count; |
| 89 | /* push hash as bencoded string */ | 107 | |
| 90 | *r++='2'; *r++='0'; *r++=':'; | 108 | switch (mode & TASK_TASK_MASK) { |
| 91 | memcpy( r, hash, sizeof(ot_hash) ); r += sizeof(ot_hash); | 109 | case TASK_FULLSCRAPE: |
| 92 | /* push rest of the scrape string */ | 110 | default: |
| 93 | r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count ); | 111 | /* push hash as bencoded string */ |
| 94 | 112 | *r++ = '2'; | |
| 95 | break; | 113 | *r++ = '0'; |
| 96 | case TASK_FULLSCRAPE_TPB_ASCII: | 114 | *r++ = ':'; |
| 97 | to_hex( r, *hash ); r+= 2 * sizeof(ot_hash); | 115 | memcpy(r, hash, sizeof(ot_hash)); |
| 98 | r += sprintf( r, ":%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count ); | 116 | r += sizeof(ot_hash); |
| 99 | break; | 117 | /* push rest of the scrape string */ |
| 100 | case TASK_FULLSCRAPE_TPB_ASCII_PLUS: | 118 | r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", seed_count, down_count, peer_count - seed_count); |
| 101 | to_hex( r, *hash ); r+= 2 * sizeof(ot_hash); | 119 | |
| 102 | r += sprintf( r, ":%zd:%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count, peer_list->down_count ); | 120 | break; |
| 103 | break; | 121 | case TASK_FULLSCRAPE_TPB_ASCII: |
| 104 | case TASK_FULLSCRAPE_TPB_BINARY: | 122 | to_hex(r, *hash); |
| 105 | memcpy( r, *hash, sizeof(ot_hash) ); r += sizeof(ot_hash); | 123 | r += 2 * sizeof(ot_hash); |
| 106 | *(uint32_t*)(r+0) = htonl( (uint32_t) peer_list->seed_count ); | 124 | r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count); |
| 107 | *(uint32_t*)(r+4) = htonl( (uint32_t)( peer_list->peer_count-peer_list->seed_count) ); | 125 | break; |
| 108 | r+=8; | 126 | case TASK_FULLSCRAPE_TPB_ASCII_PLUS: |
| 109 | break; | 127 | to_hex(r, *hash); |
| 110 | case TASK_FULLSCRAPE_TPB_URLENCODED: | 128 | r += 2 * sizeof(ot_hash); |
| 111 | r += fmt_urlencoded( r, (char *)*hash, 20 ); | 129 | r += sprintf(r, ":%zd:%zd:%zd\n", seed_count, peer_count - seed_count, down_count); |
| 112 | r += sprintf( r, ":%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count ); | 130 | break; |
| 113 | break; | 131 | case TASK_FULLSCRAPE_TPB_BINARY: |
| 114 | case TASK_FULLSCRAPE_TRACKERSTATE: | 132 | memcpy(r, *hash, sizeof(ot_hash)); |
| 115 | to_hex( r, *hash ); r+= 2 * sizeof(ot_hash); | 133 | r += sizeof(ot_hash); |
| 116 | r += sprintf( r, ":%zd:%zd\n", peer_list->base, peer_list->down_count ); | 134 | *(uint32_t *)(r + 0) = htonl((uint32_t)seed_count); |
| 117 | break; | 135 | *(uint32_t *)(r + 4) = htonl((uint32_t)(peer_count - seed_count)); |
| 118 | } | 136 | r += 8; |
| 119 | return r; | 137 | break; |
| 138 | case TASK_FULLSCRAPE_TPB_URLENCODED: | ||
| 139 | r += fmt_urlencoded(r, (char *)*hash, 20); | ||
| 140 | r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count); | ||
| 141 | break; | ||
| 142 | case TASK_FULLSCRAPE_TRACKERSTATE: | ||
| 143 | to_hex(r, *hash); | ||
| 144 | r += 2 * sizeof(ot_hash); | ||
| 145 | r += sprintf(r, ":%zd:%zd\n", torrent->peer_list6->base, down_count); | ||
| 146 | break; | ||
| 147 | } | ||
| 148 | return r; | ||
| 120 | } | 149 | } |
| 121 | 150 | ||
| 122 | static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) { | 151 | static void fullscrape_make(int taskid, ot_tasktype mode) { |
| 123 | int bucket; | 152 | int bucket; |
| 124 | char *r, *re; | 153 | char *r, *re; |
| 154 | struct iovec iovector = {NULL, 0}; | ||
| 125 | 155 | ||
| 126 | /* Setup return vector... */ | 156 | /* Setup return vector... */ |
| 127 | *iovec_entries = 0; | 157 | r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); |
| 128 | *iovector = NULL; | 158 | if (!r) |
| 129 | r = iovec_increase( iovec_entries, iovector, OT_SCRAPE_CHUNK_SIZE ); | ||
| 130 | if( !r ) | ||
| 131 | return; | 159 | return; |
| 132 | 160 | ||
| 133 | /* re points to low watermark */ | 161 | /* re points to low watermark */ |
| 134 | re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; | 162 | re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; |
| 135 | 163 | ||
| 136 | if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) | 164 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) |
| 137 | r += sprintf( r, "d5:filesd" ); | 165 | r += sprintf(r, "d5:filesd"); |
| 138 | 166 | ||
| 139 | /* For each bucket... */ | 167 | /* For each bucket... */ |
| 140 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 168 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
| 141 | /* Get exclusive access to that bucket */ | 169 | /* Get exclusive access to that bucket */ |
| 142 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 170 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
| 143 | ot_torrent *torrents = (ot_torrent*)(torrents_list->data); | 171 | ot_torrent *torrents = (ot_torrent *)(torrents_list->data); |
| 144 | size_t i; | 172 | size_t i; |
| 145 | 173 | ||
| 146 | /* For each torrent in this bucket.. */ | 174 | /* For each torrent in this bucket.. */ |
| 147 | for( i=0; i<torrents_list->size; ++i ) { | 175 | for (i = 0; i < torrents_list->size; ++i) { |
| 148 | r = fullscrape_write_one( mode, r, torrents[i].peer_list, &torrents[i].hash ); | 176 | r = fullscrape_write_one(mode, r, torrents + i, &torrents[i].hash); |
| 149 | 177 | ||
| 150 | if( r > re) { | 178 | if (r > re) { |
| 151 | /* Allocate a fresh output buffer at the end of our buffers list */ | 179 | iovector.iov_len = r - (char *)iovector.iov_base; |
| 152 | r = iovec_fix_increase_or_free( iovec_entries, iovector, r, OT_SCRAPE_CHUNK_SIZE ); | 180 | |
| 153 | if( !r ) | 181 | if (mutex_workqueue_pushchunked(taskid, &iovector)) { |
| 154 | return mutex_bucket_unlock( bucket, 0 ); | 182 | free(iovector.iov_base); |
| 183 | return mutex_bucket_unlock(bucket, 0); | ||
| 184 | } | ||
| 185 | /* Allocate a fresh output buffer */ | ||
| 186 | r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
| 187 | if (!r) | ||
| 188 | return mutex_bucket_unlock(bucket, 0); | ||
| 155 | 189 | ||
| 156 | /* re points to low watermark */ | 190 | /* re points to low watermark */ |
| 157 | re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; | 191 | re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN; |
| @@ -159,125 +193,265 @@ static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tas | |||
| 159 | } | 193 | } |
| 160 | 194 | ||
| 161 | /* All torrents done: release lock on current bucket */ | 195 | /* All torrents done: release lock on current bucket */ |
| 162 | mutex_bucket_unlock( bucket, 0 ); | 196 | mutex_bucket_unlock(bucket, 0); |
| 163 | 197 | ||
| 164 | /* Parent thread died? */ | 198 | /* Parent thread died? */ |
| 165 | if( !g_opentracker_running ) | 199 | if (!g_opentracker_running) |
| 166 | return; | 200 | return; |
| 167 | } | 201 | } |
| 168 | 202 | ||
| 169 | if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) | 203 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) |
| 170 | r += sprintf( r, "ee" ); | 204 | r += sprintf(r, "ee"); |
| 171 | 205 | ||
| 172 | /* Release unused memory in current output buffer */ | 206 | /* Send rest of data */ |
| 173 | iovec_fixlast( iovec_entries, iovector, r ); | 207 | iovector.iov_len = r - (char *)iovector.iov_base; |
| 208 | if (mutex_workqueue_pushchunked(taskid, &iovector)) | ||
| 209 | free(iovector.iov_base); | ||
| 174 | } | 210 | } |
| 175 | 211 | ||
| 176 | #ifdef WANT_COMPRESSION_GZIP | 212 | #ifdef WANT_COMPRESSION_GZIP |
| 177 | 213 | ||
| 178 | static void fullscrape_make_gzip( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) { | 214 | static void fullscrape_make_gzip(int taskid, ot_tasktype mode) { |
| 179 | int bucket; | 215 | int bucket; |
| 180 | char *r; | 216 | char *r; |
| 181 | int zres; | 217 | struct iovec iovector = {NULL, 0}; |
| 182 | z_stream strm; | 218 | int zres; |
| 183 | 219 | z_stream strm; | |
| 184 | /* Setup return vector... */ | 220 | /* Setup return vector... */ |
| 185 | *iovec_entries = 0; | 221 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); |
| 186 | *iovector = NULL; | 222 | if (!iovector.iov_base) |
| 187 | r = iovec_increase( iovec_entries, iovector, OT_SCRAPE_CHUNK_SIZE ); | ||
| 188 | if( !r ) | ||
| 189 | return; | 223 | return; |
| 190 | 224 | ||
| 191 | byte_zero( &strm, sizeof(strm) ); | 225 | byte_zero(&strm, sizeof(strm)); |
| 192 | strm.next_out = (uint8_t*)r; | 226 | strm.next_out = (uint8_t *)iovector.iov_base; |
| 193 | strm.avail_out = OT_SCRAPE_CHUNK_SIZE; | 227 | strm.avail_out = OT_SCRAPE_CHUNK_SIZE; |
| 194 | if( deflateInit2(&strm,7,Z_DEFLATED,31,9,Z_DEFAULT_STRATEGY) != Z_OK ) | 228 | if (deflateInit2(&strm, 7, Z_DEFLATED, 31, 9, Z_DEFAULT_STRATEGY) != Z_OK) |
| 195 | fprintf( stderr, "not ok.\n" ); | 229 | fprintf(stderr, "not ok.\n"); |
| 196 | 230 | ||
| 197 | if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) { | 231 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) { |
| 198 | strm.next_in = (uint8_t*)"d5:filesd"; | 232 | strm.next_in = (uint8_t *)"d5:filesd"; |
| 199 | strm.avail_in = strlen("d5:filesd"); | 233 | strm.avail_in = strlen("d5:filesd"); |
| 200 | zres = deflate( &strm, Z_NO_FLUSH ); | 234 | zres = deflate(&strm, Z_NO_FLUSH); |
| 201 | } | 235 | } |
| 202 | 236 | ||
| 203 | /* For each bucket... */ | 237 | /* For each bucket... */ |
| 204 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 238 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
| 205 | /* Get exclusive access to that bucket */ | 239 | /* Get exclusive access to that bucket */ |
| 206 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 240 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
| 207 | ot_torrent *torrents = (ot_torrent*)(torrents_list->data); | 241 | ot_torrent *torrents = (ot_torrent *)(torrents_list->data); |
| 208 | size_t i; | 242 | size_t i; |
| 209 | 243 | ||
| 210 | /* For each torrent in this bucket.. */ | 244 | /* For each torrent in this bucket.. */ |
| 211 | for( i=0; i<torrents_list->size; ++i ) { | 245 | for (i = 0; i < torrents_list->size; ++i) { |
| 212 | char compress_buffer[OT_SCRAPE_MAXENTRYLEN]; | 246 | char compress_buffer[OT_SCRAPE_MAXENTRYLEN]; |
| 213 | r = fullscrape_write_one( mode, compress_buffer, torrents[i].peer_list, &torrents[i].hash ); | 247 | r = fullscrape_write_one(mode, compress_buffer, torrents + i, &torrents[i].hash); |
| 214 | strm.next_in = (uint8_t*)compress_buffer; | 248 | strm.next_in = (uint8_t *)compress_buffer; |
| 215 | strm.avail_in = r - compress_buffer; | 249 | strm.avail_in = r - compress_buffer; |
| 216 | zres = deflate( &strm, Z_NO_FLUSH ); | 250 | zres = deflate(&strm, Z_NO_FLUSH); |
| 217 | if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) ) | 251 | if ((zres < Z_OK) && (zres != Z_BUF_ERROR)) |
| 218 | fprintf( stderr, "deflate() failed while in fullscrape_make().\n" ); | 252 | fprintf(stderr, "deflate() failed while in fullscrape_make().\n"); |
| 219 | 253 | ||
| 220 | /* Check if there still is enough buffer left */ | 254 | /* Check if there still is enough buffer left */ |
| 221 | while( !strm.avail_out ) { | 255 | while (!strm.avail_out) { |
| 222 | /* Allocate a fresh output buffer at the end of our buffers list */ | 256 | iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base; |
| 223 | r = iovec_increase( iovec_entries, iovector, OT_SCRAPE_CHUNK_SIZE ); | 257 | |
| 224 | if( !r ) { | 258 | if (mutex_workqueue_pushchunked(taskid, &iovector)) { |
| 225 | fprintf( stderr, "Problem with iovec_fix_increase_or_free\n" ); | 259 | free(iovector.iov_base); |
| 226 | iovec_free( iovec_entries, iovector ); | 260 | return mutex_bucket_unlock(bucket, 0); |
| 261 | } | ||
| 262 | /* Allocate a fresh output buffer */ | ||
| 263 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
| 264 | if (!iovector.iov_base) { | ||
| 265 | fprintf(stderr, "Out of memory trying to claim ouput buffer\n"); | ||
| 227 | deflateEnd(&strm); | 266 | deflateEnd(&strm); |
| 228 | return mutex_bucket_unlock( bucket, 0 ); | 267 | return mutex_bucket_unlock(bucket, 0); |
| 229 | } | 268 | } |
| 230 | strm.next_out = (uint8_t*)r; | 269 | strm.next_out = (uint8_t *)iovector.iov_base; |
| 231 | strm.avail_out = OT_SCRAPE_CHUNK_SIZE; | 270 | strm.avail_out = OT_SCRAPE_CHUNK_SIZE; |
| 232 | zres = deflate( &strm, Z_NO_FLUSH ); | 271 | zres = deflate(&strm, Z_NO_FLUSH); |
| 233 | if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) ) | 272 | if ((zres < Z_OK) && (zres != Z_BUF_ERROR)) |
| 234 | fprintf( stderr, "deflate() failed while in fullscrape_make().\n" ); | 273 | fprintf(stderr, "deflate() failed while in fullscrape_make().\n"); |
| 235 | } | 274 | } |
| 236 | } | 275 | } |
| 237 | 276 | ||
| 238 | /* All torrents done: release lock on current bucket */ | 277 | /* All torrents done: release lock on current bucket */ |
| 239 | mutex_bucket_unlock( bucket, 0 ); | 278 | mutex_bucket_unlock(bucket, 0); |
| 240 | 279 | ||
| 241 | /* Parent thread died? */ | 280 | /* Parent thread died? */ |
| 242 | if( !g_opentracker_running ) | 281 | if (!g_opentracker_running) { |
| 282 | deflateEnd(&strm); | ||
| 243 | return; | 283 | return; |
| 284 | } | ||
| 244 | } | 285 | } |
| 245 | 286 | ||
| 246 | if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) { | 287 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) { |
| 247 | strm.next_in = (uint8_t*)"ee"; | 288 | strm.next_in = (uint8_t *)"ee"; |
| 248 | strm.avail_in = strlen("ee"); | 289 | strm.avail_in = strlen("ee"); |
| 249 | } | 290 | } |
| 250 | 291 | ||
| 251 | if( deflate( &strm, Z_FINISH ) < Z_OK ) | 292 | if (deflate(&strm, Z_FINISH) < Z_OK) |
| 252 | fprintf( stderr, "deflate() failed while in fullscrape_make()'s endgame.\n" ); | 293 | fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n"); |
| 253 | |||
| 254 | if( !strm.avail_out ) { | ||
| 255 | unsigned int pending; | ||
| 256 | int bits; | ||
| 257 | deflatePending( &strm, &pending, &bits); | ||
| 258 | pending += ( bits ? 1 : 0 ); | ||
| 259 | 294 | ||
| 260 | /* Allocate a fresh output buffer at the end of our buffers list */ | 295 | iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base; |
| 261 | r = iovec_fix_increase_or_free( iovec_entries, iovector, strm.next_out, pending ); | 296 | if (mutex_workqueue_pushchunked(taskid, &iovector)) { |
| 262 | if( !r ) { | 297 | free(iovector.iov_base); |
| 263 | fprintf( stderr, "Problem with iovec_fix_increase_or_free\n" ); | 298 | deflateEnd(&strm); |
| 264 | deflateEnd(&strm); | 299 | return; |
| 265 | return mutex_bucket_unlock( bucket, 0 ); | ||
| 266 | } | ||
| 267 | strm.next_out = (uint8_t*)r; | ||
| 268 | strm.avail_out = pending; | ||
| 269 | if( deflate( &strm, Z_FINISH ) < Z_OK ) | ||
| 270 | fprintf( stderr, "deflate() failed while in fullscrape_make()'s endgame.\n" ); | ||
| 271 | } | 300 | } |
| 272 | 301 | ||
| 273 | /* Release unused memory in current output buffer */ | 302 | /* Check if there's a last batch of data in the zlib buffer */ |
| 274 | iovec_fixlast( iovec_entries, iovector, strm.next_out ); | 303 | if (!strm.avail_out) { |
| 304 | /* Allocate a fresh output buffer */ | ||
| 305 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
| 306 | |||
| 307 | if (!iovector.iov_base) { | ||
| 308 | fprintf(stderr, "Problem with iovec_fix_increase_or_free\n"); | ||
| 309 | deflateEnd(&strm); | ||
| 310 | return; | ||
| 311 | } | ||
| 312 | strm.next_out = iovector.iov_base; | ||
| 313 | strm.avail_out = OT_SCRAPE_CHUNK_SIZE; | ||
| 314 | if (deflate(&strm, Z_FINISH) < Z_OK) | ||
| 315 | fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n"); | ||
| 316 | |||
| 317 | /* Only pass the new buffer if there actually was some data left in the buffer */ | ||
| 318 | iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base; | ||
| 319 | if (!iovector.iov_len || mutex_workqueue_pushchunked(taskid, &iovector)) | ||
| 320 | free(iovector.iov_base); | ||
| 321 | } | ||
| 275 | 322 | ||
| 276 | deflateEnd(&strm); | 323 | deflateEnd(&strm); |
| 277 | } | 324 | } |
| 278 | /* WANT_COMPRESSION_GZIP */ | 325 | /* WANT_COMPRESSION_GZIP */ |
| 279 | #endif | 326 | #endif |
| 280 | 327 | ||
| 328 | #ifdef WANT_COMPRESSION_ZSTD | ||
| 329 | |||
| 330 | static void fullscrape_make_zstd(int taskid, ot_tasktype mode) { | ||
| 331 | int bucket; | ||
| 332 | char *r; | ||
| 333 | struct iovec iovector = {NULL, 0}; | ||
| 334 | ZSTD_CCtx *zstream = ZSTD_createCCtx(); | ||
| 335 | ZSTD_inBuffer inbuf; | ||
| 336 | ZSTD_outBuffer outbuf; | ||
| 337 | size_t more_bytes; | ||
| 338 | |||
| 339 | if (!zstream) | ||
| 340 | return; | ||
| 341 | |||
| 342 | /* Setup return vector... */ | ||
| 343 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
| 344 | if (!iovector.iov_base) { | ||
| 345 | ZSTD_freeCCtx(zstream); | ||
| 346 | return; | ||
| 347 | } | ||
| 348 | |||
| 349 | /* Working with a compression level 6 is half as fast as level 3, but | ||
| 350 | seems to be the last reasonable bump that's worth extra cpu */ | ||
| 351 | ZSTD_CCtx_setParameter(zstream, ZSTD_c_compressionLevel, 6); | ||
| 352 | |||
| 353 | outbuf.dst = iovector.iov_base; | ||
| 354 | outbuf.size = OT_SCRAPE_CHUNK_SIZE; | ||
| 355 | outbuf.pos = 0; | ||
| 356 | |||
| 357 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) { | ||
| 358 | inbuf.src = (const void *)"d5:filesd"; | ||
| 359 | inbuf.size = strlen("d5:filesd"); | ||
| 360 | inbuf.pos = 0; | ||
| 361 | ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue); | ||
| 362 | } | ||
| 363 | |||
| 364 | /* For each bucket... */ | ||
| 365 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { | ||
| 366 | /* Get exclusive access to that bucket */ | ||
| 367 | ot_vector *torrents_list = mutex_bucket_lock(bucket); | ||
| 368 | ot_torrent *torrents = (ot_torrent *)(torrents_list->data); | ||
| 369 | size_t i; | ||
| 370 | |||
| 371 | /* For each torrent in this bucket.. */ | ||
| 372 | for (i = 0; i < torrents_list->size; ++i) { | ||
| 373 | char compress_buffer[OT_SCRAPE_MAXENTRYLEN]; | ||
| 374 | r = fullscrape_write_one(mode, compress_buffer, torrents + i, &torrents[i].hash); | ||
| 375 | inbuf.src = compress_buffer; | ||
| 376 | inbuf.size = r - compress_buffer; | ||
| 377 | inbuf.pos = 0; | ||
| 378 | ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue); | ||
| 379 | |||
| 380 | /* Check if there still is enough buffer left */ | ||
| 381 | while (outbuf.pos + OT_SCRAPE_MAXENTRYLEN > outbuf.size) { | ||
| 382 | iovector.iov_len = outbuf.size; | ||
| 383 | |||
| 384 | if (mutex_workqueue_pushchunked(taskid, &iovector)) { | ||
| 385 | free(iovector.iov_base); | ||
| 386 | ZSTD_freeCCtx(zstream); | ||
| 387 | return mutex_bucket_unlock(bucket, 0); | ||
| 388 | } | ||
| 389 | /* Allocate a fresh output buffer */ | ||
| 390 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
| 391 | if (!iovector.iov_base) { | ||
| 392 | fprintf(stderr, "Out of memory trying to claim ouput buffer\n"); | ||
| 393 | ZSTD_freeCCtx(zstream); | ||
| 394 | return mutex_bucket_unlock(bucket, 0); | ||
| 395 | } | ||
| 396 | |||
| 397 | outbuf.dst = iovector.iov_base; | ||
| 398 | outbuf.size = OT_SCRAPE_CHUNK_SIZE; | ||
| 399 | outbuf.pos = 0; | ||
| 400 | |||
| 401 | ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue); | ||
| 402 | } | ||
| 403 | } | ||
| 404 | |||
| 405 | /* All torrents done: release lock on current bucket */ | ||
| 406 | mutex_bucket_unlock(bucket, 0); | ||
| 407 | |||
| 408 | /* Parent thread died? */ | ||
| 409 | if (!g_opentracker_running) | ||
| 410 | return; | ||
| 411 | } | ||
| 412 | |||
| 413 | if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) { | ||
| 414 | inbuf.src = (const void *)"ee"; | ||
| 415 | inbuf.size = strlen("ee"); | ||
| 416 | inbuf.pos = 0; | ||
| 417 | } | ||
| 418 | |||
| 419 | more_bytes = ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_end); | ||
| 420 | |||
| 421 | iovector.iov_len = outbuf.pos; | ||
| 422 | if (mutex_workqueue_pushchunked(taskid, &iovector)) { | ||
| 423 | free(iovector.iov_base); | ||
| 424 | ZSTD_freeCCtx(zstream); | ||
| 425 | return; | ||
| 426 | } | ||
| 427 | |||
| 428 | /* Check if there's a last batch of data in the zlib buffer */ | ||
| 429 | if (more_bytes) { | ||
| 430 | /* Allocate a fresh output buffer */ | ||
| 431 | iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE); | ||
| 432 | |||
| 433 | if (!iovector.iov_base) { | ||
| 434 | fprintf(stderr, "Problem with iovec_fix_increase_or_free\n"); | ||
| 435 | ZSTD_freeCCtx(zstream); | ||
| 436 | return; | ||
| 437 | } | ||
| 438 | |||
| 439 | outbuf.dst = iovector.iov_base; | ||
| 440 | outbuf.size = OT_SCRAPE_CHUNK_SIZE; | ||
| 441 | outbuf.pos = 0; | ||
| 442 | |||
| 443 | ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_end); | ||
| 444 | |||
| 445 | /* Only pass the new buffer if there actually was some data left in the buffer */ | ||
| 446 | iovector.iov_len = outbuf.pos; | ||
| 447 | if (!iovector.iov_len || mutex_workqueue_pushchunked(taskid, &iovector)) | ||
| 448 | free(iovector.iov_base); | ||
| 449 | } | ||
| 450 | |||
| 451 | ZSTD_freeCCtx(zstream); | ||
| 452 | } | ||
| 453 | /* WANT_COMPRESSION_ZSTD */ | ||
| 454 | #endif | ||
| 455 | |||
| 281 | /* WANT_FULLSCRAPE */ | 456 | /* WANT_FULLSCRAPE */ |
| 282 | #endif | 457 | #endif |
| 283 | const char *g_version_fullscrape_c = "$Source$: $Revision$\n"; | ||
diff --git a/ot_fullscrape.h b/ot_fullscrape.h index 0f920ec..bbb2a3f 100644 --- a/ot_fullscrape.h +++ b/ot_fullscrape.h | |||
| @@ -8,9 +8,11 @@ | |||
| 8 | 8 | ||
| 9 | #ifdef WANT_FULLSCRAPE | 9 | #ifdef WANT_FULLSCRAPE |
| 10 | 10 | ||
| 11 | void fullscrape_init( ); | 11 | #include "ot_mutex.h" |
| 12 | void fullscrape_deinit( ); | 12 | |
| 13 | void fullscrape_deliver( int64 sock, ot_tasktype tasktype ); | 13 | void fullscrape_init(); |
| 14 | void fullscrape_deinit(); | ||
| 15 | void fullscrape_deliver(int64 sock, ot_tasktype tasktype); | ||
| 14 | 16 | ||
| 15 | #else | 17 | #else |
| 16 | 18 | ||
| @@ -4,531 +4,653 @@ | |||
| 4 | $id$ */ | 4 | $id$ */ |
| 5 | 5 | ||
| 6 | /* System */ | 6 | /* System */ |
| 7 | #include <sys/types.h> | 7 | #define _GNU_SOURCE |
| 8 | #include <arpa/inet.h> | 8 | #include <arpa/inet.h> |
| 9 | #include <stdlib.h> | 9 | #include <pthread.h> |
| 10 | #include <stdio.h> | 10 | #include <stdio.h> |
| 11 | #include <stdlib.h> | ||
| 11 | #include <string.h> | 12 | #include <string.h> |
| 13 | #include <sys/types.h> | ||
| 12 | #include <unistd.h> | 14 | #include <unistd.h> |
| 13 | #include <pthread.h> | ||
| 14 | 15 | ||
| 15 | /* Libowfat */ | 16 | /* Libowfat */ |
| 16 | #include "byte.h" | ||
| 17 | #include "array.h" | 17 | #include "array.h" |
| 18 | #include "byte.h" | ||
| 19 | #include "case.h" | ||
| 18 | #include "iob.h" | 20 | #include "iob.h" |
| 19 | #include "ip6.h" | 21 | #include "ip6.h" |
| 20 | #include "scan.h" | 22 | #include "scan.h" |
| 21 | #include "case.h" | ||
| 22 | 23 | ||
| 23 | /* Opentracker */ | 24 | /* Opentracker */ |
| 24 | #include "trackerlogic.h" | 25 | #include "ot_accesslist.h" |
| 25 | #include "ot_mutex.h" | 26 | #include "ot_fullscrape.h" |
| 26 | #include "ot_http.h" | 27 | #include "ot_http.h" |
| 27 | #include "ot_iovec.h" | 28 | #include "ot_iovec.h" |
| 28 | #include "scan_urlencoded_query.h" | 29 | #include "ot_mutex.h" |
| 29 | #include "ot_fullscrape.h" | ||
| 30 | #include "ot_stats.h" | 30 | #include "ot_stats.h" |
| 31 | #include "ot_accesslist.h" | 31 | #include "scan_urlencoded_query.h" |
| 32 | #include "trackerlogic.h" | ||
| 33 | |||
| 34 | #ifdef WANT_NO_AUTO_FREE | ||
| 35 | #define OT_IOB_INIT(B) (bzero(B, sizeof(io_batch)), 0) | ||
| 36 | #else | ||
| 37 | #define OT_IOB_INIT(B) iob_init_autofree(B, 0) | ||
| 38 | #endif | ||
| 32 | 39 | ||
| 33 | #define OT_MAXMULTISCRAPE_COUNT 64 | 40 | #define OT_MAXMULTISCRAPE_COUNT 64 |
| 34 | #define OT_BATCH_LIMIT (1024*1024*16) | 41 | #define OT_BATCH_LIMIT (1024 * 1024 * 16) |
| 35 | extern char *g_redirecturl; | 42 | extern char *g_redirecturl; |
| 36 | 43 | ||
| 37 | char *g_stats_path; | 44 | char *g_stats_path; |
| 38 | ssize_t g_stats_path_len; | 45 | ssize_t g_stats_path_len; |
| 39 | 46 | ||
| 40 | enum { | 47 | enum { SUCCESS_HTTP_HEADER_LENGTH = 80, SUCCESS_HTTP_SIZE_OFF = 17 }; |
| 41 | SUCCESS_HTTP_HEADER_LENGTH = 80, | ||
| 42 | SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING = 32, | ||
| 43 | SUCCESS_HTTP_SIZE_OFF = 17 }; | ||
| 44 | 48 | ||
| 45 | static void http_senddata( const int64 sock, struct ot_workstruct *ws ) { | 49 | static void http_senddata(const int64 sock, struct ot_workstruct *ws) { |
| 46 | struct http_data *cookie = io_getcookie( sock ); | 50 | struct http_data *cookie = io_getcookie(sock); |
| 47 | ssize_t written_size; | 51 | ssize_t written_size; |
| 48 | 52 | ||
| 49 | if( !cookie ) { io_close(sock); return; } | 53 | if (!cookie) { |
| 54 | io_close(sock); | ||
| 55 | return; | ||
| 56 | } | ||
| 50 | 57 | ||
| 51 | /* whoever sends data is not interested in its input-array */ | 58 | /* whoever sends data is not interested in its input-array */ |
| 52 | if( ws->keep_alive && ws->header_size != ws->request_size ) { | 59 | if (ws->keep_alive && ws->header_size != ws->request_size) { |
| 53 | size_t rest = ws->request_size - ws->header_size; | 60 | size_t rest = ws->request_size - ws->header_size; |
| 54 | if( array_start(&cookie->request) ) { | 61 | if (array_start(&cookie->request)) { |
| 55 | memmove( array_start(&cookie->request), ws->request + ws->header_size, rest ); | 62 | memmove(array_start(&cookie->request), ws->request + ws->header_size, rest); |
| 56 | array_truncate( &cookie->request, 1, rest ); | 63 | array_truncate(&cookie->request, 1, rest); |
| 57 | } else | 64 | } else |
| 58 | array_catb(&cookie->request, ws->request + ws->header_size, rest ); | 65 | array_catb(&cookie->request, ws->request + ws->header_size, rest); |
| 59 | } else | 66 | } else |
| 60 | array_reset( &cookie->request ); | 67 | array_reset(&cookie->request); |
| 61 | 68 | ||
| 62 | written_size = write( sock, ws->reply, ws->reply_size ); | 69 | written_size = write(sock, ws->reply, ws->reply_size); |
| 63 | if( ( written_size < 0 ) || ( ( written_size == ws->reply_size ) && !ws->keep_alive ) ) { | 70 | if ((written_size < 0) || ((written_size == ws->reply_size) && !ws->keep_alive)) { |
| 64 | array_reset( &cookie->request ); | 71 | array_reset(&cookie->request); |
| 65 | free( cookie ); io_close( sock ); return; | 72 | free(cookie); |
| 73 | io_close(sock); | ||
| 74 | return; | ||
| 66 | } | 75 | } |
| 67 | 76 | ||
| 68 | if( written_size < ws->reply_size ) { | 77 | if (written_size < ws->reply_size) { |
| 69 | char * outbuf; | 78 | char *outbuf; |
| 70 | tai6464 t; | 79 | tai6464 t; |
| 71 | 80 | ||
| 72 | if( !( outbuf = malloc( ws->reply_size - written_size ) ) ) { | 81 | if (!(outbuf = malloc(ws->reply_size - written_size))) { |
| 73 | array_reset( &cookie->request ); | 82 | array_reset(&cookie->request); |
| 74 | free(cookie); io_close( sock ); | 83 | free(cookie); |
| 84 | io_close(sock); | ||
| 75 | return; | 85 | return; |
| 76 | } | 86 | } |
| 77 | 87 | ||
| 78 | memcpy( outbuf, ws->reply + written_size, ws->reply_size - written_size ); | 88 | memcpy(outbuf, ws->reply + written_size, ws->reply_size - written_size); |
| 79 | if ( !cookie->batch ) { | 89 | if (!cookie->batch) { |
| 80 | cookie->batch = malloc( sizeof(io_batch) ); | 90 | cookie->batch = malloc(sizeof(io_batch)); |
| 81 | memset( cookie->batch, 0, sizeof(io_batch) ); | 91 | if (!cookie->batch || OT_IOB_INIT(cookie->batch) == -1) { |
| 82 | cookie->batches = 1; | 92 | free(cookie->batch); |
| 93 | free(outbuf); | ||
| 94 | array_reset(&cookie->request); | ||
| 95 | free(cookie); | ||
| 96 | io_close(sock); | ||
| 97 | return; | ||
| 98 | } | ||
| 99 | |||
| 100 | cookie->batches = 1; | ||
| 83 | } | 101 | } |
| 84 | 102 | ||
| 85 | iob_addbuf_free( cookie->batch, outbuf, ws->reply_size - written_size ); | 103 | iob_addbuf_free(cookie->batch, outbuf, ws->reply_size - written_size); |
| 86 | 104 | ||
| 87 | /* writeable short data sockets just have a tcp timeout */ | 105 | /* writeable short data sockets just have a tcp timeout */ |
| 88 | if( !ws->keep_alive ) { | 106 | if (!ws->keep_alive) { |
| 89 | taia_uint( &t, 0 ); io_timeout( sock, t ); | 107 | taia_uint(&t, 0); |
| 90 | io_dontwantread( sock ); | 108 | io_timeout(sock, t); |
| 109 | io_dontwantread(sock); | ||
| 91 | } | 110 | } |
| 92 | io_wantwrite( sock ); | 111 | io_wantwrite(sock); |
| 93 | } | 112 | } |
| 94 | } | 113 | } |
| 95 | 114 | ||
| 96 | #define HTTPERROR_302 return http_issue_error( sock, ws, CODE_HTTPERROR_302 ) | 115 | #define HTTPERROR_302 return http_issue_error(sock, ws, CODE_HTTPERROR_302) |
| 97 | #define HTTPERROR_400 return http_issue_error( sock, ws, CODE_HTTPERROR_400 ) | 116 | #define HTTPERROR_400 return http_issue_error(sock, ws, CODE_HTTPERROR_400) |
| 98 | #define HTTPERROR_400_PARAM return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM ) | 117 | #define HTTPERROR_400_PARAM return http_issue_error(sock, ws, CODE_HTTPERROR_400_PARAM) |
| 99 | #define HTTPERROR_400_COMPACT return http_issue_error( sock, ws, CODE_HTTPERROR_400_COMPACT ) | 118 | #define HTTPERROR_400_COMPACT return http_issue_error(sock, ws, CODE_HTTPERROR_400_COMPACT) |
| 100 | #define HTTPERROR_400_DOUBLEHASH return http_issue_error( sock, ws, CODE_HTTPERROR_400_PARAM ) | 119 | #define HTTPERROR_400_DOUBLEHASH return http_issue_error(sock, ws, CODE_HTTPERROR_400_PARAM) |
| 101 | #define HTTPERROR_402_NOTMODEST return http_issue_error( sock, ws, CODE_HTTPERROR_402_NOTMODEST ) | 120 | #define HTTPERROR_402_NOTMODEST return http_issue_error(sock, ws, CODE_HTTPERROR_402_NOTMODEST) |
| 102 | #define HTTPERROR_403_IP return http_issue_error( sock, ws, CODE_HTTPERROR_403_IP ) | 121 | #define HTTPERROR_403_IP return http_issue_error(sock, ws, CODE_HTTPERROR_403_IP) |
| 103 | #define HTTPERROR_404 return http_issue_error( sock, ws, CODE_HTTPERROR_404 ) | 122 | #define HTTPERROR_404 return http_issue_error(sock, ws, CODE_HTTPERROR_404) |
| 104 | #define HTTPERROR_500 return http_issue_error( sock, ws, CODE_HTTPERROR_500 ) | 123 | #define HTTPERROR_500 return http_issue_error(sock, ws, CODE_HTTPERROR_500) |
| 105 | ssize_t http_issue_error( const int64 sock, struct ot_workstruct *ws, int code ) { | 124 | ssize_t http_issue_error(const int64 sock, struct ot_workstruct *ws, int code) { |
| 106 | char *error_code[] = { "302 Found", "400 Invalid Request", "400 Invalid Request", "400 Invalid Request", "402 Payment Required", | 125 | char *error_code[] = {"302 Found", "400 Invalid Request", "400 Invalid Request", "400 Invalid Request", "402 Payment Required", |
| 107 | "403 Not Modest", "403 Access Denied", "404 Not Found", "500 Internal Server Error" }; | 126 | "403 Not Modest", "403 Access Denied", "404 Not Found", "500 Internal Server Error"}; |
| 108 | char *title = error_code[code]; | 127 | char *title = error_code[code]; |
| 109 | 128 | ||
| 110 | ws->reply = ws->outbuf; | 129 | ws->reply = ws->outbuf; |
| 111 | if( code == CODE_HTTPERROR_302 ) | 130 | if (code == CODE_HTTPERROR_302) |
| 112 | ws->reply_size = snprintf( ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 302 Found\r\nContent-Length: 0\r\nLocation: %s\r\n\r\n", g_redirecturl ); | 131 | ws->reply_size = snprintf(ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 302 Found\r\nContent-Length: 0\r\nLocation: %s\r\n\r\n", g_redirecturl); |
| 113 | else | 132 | else |
| 114 | ws->reply_size = snprintf( ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 %s\r\nContent-Type: text/html\r\nContent-Length: %zd\r\n\r\n<title>%s</title>\n", title, strlen(title)+16-4,title+4); | 133 | ws->reply_size = snprintf(ws->reply, G_OUTBUF_SIZE, "HTTP/1.0 %s\r\nContent-Type: text/html\r\nContent-Length: %zd\r\n\r\n<title>%s</title>\n", title, |
| 134 | strlen(title) + 16 - 4, title + 4); | ||
| 115 | 135 | ||
| 116 | #ifdef _DEBUG_HTTPERROR | 136 | #ifdef _DEBUG_HTTPERROR |
| 117 | fprintf( stderr, "DEBUG: invalid request was: %s\n", ws->debugbuf ); | 137 | fprintf(stderr, "DEBUG: invalid request was: %s\n", ws->debugbuf); |
| 118 | #endif | 138 | #endif |
| 119 | stats_issue_event( EVENT_FAILED, FLAG_TCP, code ); | 139 | stats_issue_event(EVENT_FAILED, FLAG_TCP, code); |
| 120 | http_senddata( sock, ws ); | 140 | http_senddata(sock, ws); |
| 121 | return ws->reply_size = -2; | 141 | return ws->reply_size = -2; |
| 122 | } | 142 | } |
| 123 | 143 | ||
| 124 | ssize_t http_sendiovecdata( const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ) { | 144 | ssize_t http_sendiovecdata(const int64 sock, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial) { |
| 125 | struct http_data *cookie = io_getcookie( sock ); | 145 | struct http_data *cookie = io_getcookie(sock); |
| 126 | char *header; | 146 | io_batch *current; |
| 127 | int i; | 147 | char *header; |
| 128 | size_t header_size, size = iovec_length( &iovec_entries, (const struct iovec **)&iovector ); | 148 | const char *encoding = ""; |
| 129 | tai6464 t; | 149 | int i; |
| 150 | size_t header_size, size = iovec_length(&iovec_entries, (const struct iovec **)&iovector); | ||
| 151 | tai6464 t; | ||
| 130 | 152 | ||
| 131 | /* No cookie? Bad socket. Leave. */ | 153 | /* No cookie? Bad socket. Leave. */ |
| 132 | if( !cookie ) { | 154 | if (!cookie) { |
| 133 | iovec_free( &iovec_entries, &iovector ); | 155 | iovec_free(&iovec_entries, &iovector); |
| 134 | HTTPERROR_500; | 156 | HTTPERROR_500; |
| 135 | } | 157 | } |
| 136 | 158 | ||
| 137 | /* If this socket collected request in a buffer, free it now */ | 159 | /* If this socket collected request in a buffer, free it now */ |
| 138 | array_reset( &cookie->request ); | 160 | array_reset(&cookie->request); |
| 139 | 161 | ||
| 140 | /* If we came here, wait for the answer is over */ | 162 | /* If we came here, wait for the answer is over */ |
| 141 | cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK; | 163 | if (cookie->flag & STRUCT_HTTP_FLAG_WAITINGFORTASK) { |
| 142 | 164 | io_dontwantread(sock); | |
| 143 | /* Our answers never are 0 vectors. Return an error. */ | 165 | cookie->flag &= ~STRUCT_HTTP_FLAG_WAITINGFORTASK; |
| 144 | if( !iovec_entries ) { | ||
| 145 | HTTPERROR_500; | ||
| 146 | } | 166 | } |
| 147 | 167 | ||
| 148 | /* Prepare space for http header */ | 168 | if (iovec_entries) { |
| 149 | header = malloc( SUCCESS_HTTP_HEADER_LENGTH + SUCCESS_HTTP_HEADER_LENGTH_CONTENT_ENCODING ); | 169 | |
| 150 | if( !header ) { | 170 | if (cookie->flag & STRUCT_HTTP_FLAG_ZSTD) |
| 151 | iovec_free( &iovec_entries, &iovector ); | 171 | encoding = "Content-Encoding: zstd\r\n"; |
| 152 | HTTPERROR_500; | 172 | else if (cookie->flag & STRUCT_HTTP_FLAG_GZIP) |
| 153 | } | 173 | encoding = "Content-Encoding: gzip\r\n"; |
| 154 | 174 | else if (cookie->flag & STRUCT_HTTP_FLAG_BZIP2) | |
| 155 | if( cookie->flag & STRUCT_HTTP_FLAG_GZIP ) | 175 | encoding = "Content-Encoding: bzip2\r\n"; |
| 156 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: gzip\r\nContent-Length: %zd\r\n\r\n", size ); | 176 | |
| 157 | else if( cookie->flag & STRUCT_HTTP_FLAG_BZIP2 ) | 177 | if (!(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED)) |
| 158 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Encoding: bzip2\r\nContent-Length: %zd\r\n\r\n", size ); | 178 | header_size = asprintf(&header, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\n%sContent-Length: %zd\r\n\r\n", encoding, size); |
| 159 | else | 179 | else { |
| 160 | header_size = sprintf( header, "HTTP/1.0 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r\n", size ); | 180 | if (!(cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER)) { |
| 181 | header_size = | ||
| 182 | asprintf(&header, "HTTP/1.1 200 OK\r\nContent-Type: application/octet-stream\r\n%sTransfer-Encoding: chunked\r\n\r\n%zx\r\n", encoding, size); | ||
| 183 | cookie->flag |= STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER; | ||
| 184 | } else | ||
| 185 | header_size = asprintf(&header, "%zx\r\n", size); | ||
| 186 | } | ||
| 187 | if (!header) { | ||
| 188 | iovec_free(&iovec_entries, &iovector); | ||
| 189 | HTTPERROR_500; | ||
| 190 | } | ||
| 161 | 191 | ||
| 162 | if (!cookie->batch ) { | 192 | if (!cookie->batch) { |
| 163 | cookie->batch = malloc( sizeof(io_batch) ); | 193 | cookie->batch = malloc(sizeof(io_batch)); |
| 164 | memset( cookie->batch, 0, sizeof(io_batch) ); | 194 | if (!cookie->batch || OT_IOB_INIT(cookie->batch) == -1) { |
| 165 | cookie->batches = 1; | 195 | free(cookie->batch); |
| 166 | } | 196 | free(header); |
| 167 | iob_addbuf_free( cookie->batch, header, header_size ); | 197 | iovec_free(&iovec_entries, &iovector); |
| 168 | 198 | HTTPERROR_500; | |
| 169 | /* Split huge iovectors into separate io_batches */ | 199 | } |
| 170 | for( i=0; i<iovec_entries; ++i ) { | 200 | cookie->batches = 1; |
| 171 | io_batch *current = cookie->batch + cookie->batches - 1; | 201 | } |
| 172 | 202 | current = cookie->batch + cookie->batches - 1; | |
| 173 | /* If the current batch's limit is reached, try to reallocate a new batch to work on */ | 203 | iob_addbuf_free(current, header, header_size); |
| 174 | if( current->bytesleft > OT_BATCH_LIMIT ) { | 204 | |
| 175 | io_batch * new_batch = realloc( current, (cookie->batches + 1) * sizeof(io_batch) ); | 205 | /* Split huge iovectors into separate io_batches */ |
| 176 | if( new_batch ) { | 206 | for (i = 0; i < iovec_entries; ++i) { |
| 177 | cookie->batches++; | 207 | /* If the current batch's limit is reached, try to reallocate a new batch to work on */ |
| 178 | current = cookie->batch = new_batch; | 208 | if (current->bytesleft > OT_BATCH_LIMIT) { |
| 179 | memset( current, 0, sizeof(io_batch) ); | 209 | io_batch *new_batch = realloc(cookie->batch, (cookie->batches + 1) * sizeof(io_batch)); |
| 210 | if (new_batch) { | ||
| 211 | cookie->batch = new_batch; | ||
| 212 | if (OT_IOB_INIT(current) != -1) | ||
| 213 | current = cookie->batch + cookie->batches++; | ||
| 180 | } | 214 | } |
| 215 | } | ||
| 216 | iob_addbuf_free(current, iovector[i].iov_base, iovector[i].iov_len); | ||
| 181 | } | 217 | } |
| 218 | free(iovector); | ||
| 219 | if (cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER) | ||
| 220 | iob_addbuf(current, "\r\n", 2); | ||
| 221 | } | ||
| 182 | 222 | ||
| 183 | iob_addbuf_free( current, iovector[i].iov_base, iovector[i].iov_len ); | 223 | if ((cookie->flag & STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER) && cookie->batch && !is_partial) { |
| 224 | current = cookie->batch + cookie->batches - 1; | ||
| 225 | iob_addbuf(current, "0\r\n\r\n", 5); | ||
| 226 | cookie->flag &= ~STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER; | ||
| 184 | } | 227 | } |
| 185 | free( iovector ); | ||
| 186 | 228 | ||
| 187 | /* writeable sockets timeout after 10 minutes */ | 229 | /* writeable sockets timeout after 10 minutes */ |
| 188 | taia_now( &t ); taia_addsec( &t, &t, OT_CLIENT_TIMEOUT_SEND ); | 230 | taia_now(&t); |
| 189 | io_timeout( sock, t ); | 231 | taia_addsec(&t, &t, OT_CLIENT_TIMEOUT_SEND); |
| 190 | io_dontwantread( sock ); | 232 | io_timeout(sock, t); |
| 191 | io_wantwrite( sock ); | 233 | io_wantwrite(sock); |
| 192 | return 0; | 234 | return 0; |
| 193 | } | 235 | } |
| 194 | 236 | ||
| 195 | static ssize_t http_handle_stats( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { | 237 | static ssize_t http_handle_stats(const int64 sock, struct ot_workstruct *ws, char *read_ptr) { |
| 196 | static const ot_keywords keywords_main[] = | 238 | static const ot_keywords keywords_main[] = {{"mode", 1}, {"format", 2}, {"info_hash", 3}, {NULL, -3}}; |
| 197 | { { "mode", 1 }, {"format", 2 }, {"info_hash", 3}, { NULL, -3 } }; | 239 | static const ot_keywords keywords_mode[] = {{"peer", TASK_STATS_PEERS}, |
| 198 | static const ot_keywords keywords_mode[] = | 240 | {"conn", TASK_STATS_CONNS}, |
| 199 | { { "peer", TASK_STATS_PEERS }, { "conn", TASK_STATS_CONNS }, { "scrp", TASK_STATS_SCRAPE }, { "udp4", TASK_STATS_UDP }, { "tcp4", TASK_STATS_TCP }, | 241 | {"scrp", TASK_STATS_SCRAPE}, |
| 200 | { "busy", TASK_STATS_BUSY_NETWORKS }, { "torr", TASK_STATS_TORRENTS }, { "fscr", TASK_STATS_FULLSCRAPE }, | 242 | {"udp4", TASK_STATS_UDP}, |
| 201 | { "s24s", TASK_STATS_SLASH24S }, { "tpbs", TASK_STATS_TPB }, { "herr", TASK_STATS_HTTPERRORS }, { "completed", TASK_STATS_COMPLETED }, | 243 | {"tcp4", TASK_STATS_TCP}, |
| 202 | { "top100", TASK_STATS_TOP100 }, { "top10", TASK_STATS_TOP10 }, { "renew", TASK_STATS_RENEW }, { "syncs", TASK_STATS_SYNCS }, { "version", TASK_STATS_VERSION }, | 244 | {"busy", TASK_STATS_BUSY_NETWORKS}, |
| 203 | { "everything", TASK_STATS_EVERYTHING }, { "statedump", TASK_FULLSCRAPE_TRACKERSTATE }, { "fulllog", TASK_STATS_FULLLOG }, | 245 | {"torr", TASK_STATS_TORRENTS}, |
| 204 | { "woodpeckers", TASK_STATS_WOODPECKERS}, | 246 | {"fscr", TASK_STATS_FULLSCRAPE}, |
| 247 | {"s24s", TASK_STATS_SLASH24S}, | ||
| 248 | {"tpbs", TASK_STATS_TPB}, | ||
| 249 | {"herr", TASK_STATS_HTTPERRORS}, | ||
| 250 | {"completed", TASK_STATS_COMPLETED}, | ||
| 251 | {"top100", TASK_STATS_TOP100}, | ||
| 252 | {"top10", TASK_STATS_TOP10}, | ||
| 253 | {"renew", TASK_STATS_RENEW}, | ||
| 254 | {"syncs", TASK_STATS_SYNCS}, | ||
| 255 | {"version", TASK_STATS_VERSION}, | ||
| 256 | {"everything", TASK_STATS_EVERYTHING}, | ||
| 257 | {"statedump", TASK_FULLSCRAPE_TRACKERSTATE}, | ||
| 258 | {"fulllog", TASK_STATS_FULLLOG}, | ||
| 259 | {"woodpeckers", TASK_STATS_WOODPECKERS}, | ||
| 205 | #ifdef WANT_LOG_NUMWANT | 260 | #ifdef WANT_LOG_NUMWANT |
| 206 | { "numwants", TASK_STATS_NUMWANTS}, | 261 | {"numwants", TASK_STATS_NUMWANTS}, |
| 207 | #endif | 262 | #endif |
| 208 | { NULL, -3 } }; | 263 | {NULL, -3}}; |
| 209 | static const ot_keywords keywords_format[] = | 264 | static const ot_keywords keywords_format[] = {{"bin", TASK_FULLSCRAPE_TPB_BINARY}, {"ben", TASK_FULLSCRAPE}, |
| 210 | { { "bin", TASK_FULLSCRAPE_TPB_BINARY }, { "ben", TASK_FULLSCRAPE }, { "url", TASK_FULLSCRAPE_TPB_URLENCODED }, | 265 | {"url", TASK_FULLSCRAPE_TPB_URLENCODED}, {"txt", TASK_FULLSCRAPE_TPB_ASCII}, |
| 211 | { "txt", TASK_FULLSCRAPE_TPB_ASCII }, { "txtp", TASK_FULLSCRAPE_TPB_ASCII_PLUS }, { NULL, -3 } }; | 266 | {"txtp", TASK_FULLSCRAPE_TPB_ASCII_PLUS}, {NULL, -3}}; |
| 212 | 267 | ||
| 213 | int mode = TASK_STATS_PEERS, scanon = 1, format = 0; | 268 | int mode = TASK_STATS_PEERS, scanon = 1, format = 0; |
| 214 | 269 | ||
| 215 | #ifdef WANT_RESTRICT_STATS | 270 | #ifdef WANT_RESTRICT_STATS |
| 216 | struct http_data *cookie = io_getcookie( sock ); | 271 | struct http_data *cookie = io_getcookie(sock); |
| 217 | 272 | ||
| 218 | if( !cookie || !accesslist_is_blessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) | 273 | if (!cookie || !accesslist_is_blessed(cookie->ip, OT_PERMISSION_MAY_STAT)) |
| 219 | HTTPERROR_403_IP; | 274 | HTTPERROR_403_IP; |
| 220 | #endif | 275 | #endif |
| 221 | 276 | ||
| 222 | while( scanon ) { | 277 | while (scanon) { |
| 223 | switch( scan_find_keywords( keywords_main, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { | 278 | switch (scan_find_keywords(keywords_main, &read_ptr, SCAN_SEARCHPATH_PARAM)) { |
| 224 | case -2: scanon = 0; break; /* TERMINATOR */ | 279 | case -2: |
| 225 | case -1: HTTPERROR_400_PARAM; /* PARSE ERROR */ | 280 | scanon = 0; |
| 226 | case -3: scan_urlencoded_skipvalue( &read_ptr ); break; | 281 | break; /* TERMINATOR */ |
| 227 | case 1: /* matched "mode" */ | 282 | case -1: |
| 228 | if( ( mode = scan_find_keywords( keywords_mode, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; | 283 | HTTPERROR_400_PARAM; /* PARSE ERROR */ |
| 284 | case -3: | ||
| 285 | scan_urlencoded_skipvalue(&read_ptr); | ||
| 286 | break; | ||
| 287 | case 1: /* matched "mode" */ | ||
| 288 | if ((mode = scan_find_keywords(keywords_mode, &read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0) | ||
| 289 | HTTPERROR_400_PARAM; | ||
| 229 | break; | 290 | break; |
| 230 | case 2: /* matched "format" */ | 291 | case 2: /* matched "format" */ |
| 231 | if( ( format = scan_find_keywords( keywords_format, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; | 292 | if ((format = scan_find_keywords(keywords_format, &read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0) |
| 293 | HTTPERROR_400_PARAM; | ||
| 232 | break; | 294 | break; |
| 233 | case 3: HTTPERROR_400_PARAM; /* If the stats URL was mistakenly added as announce URL, return a 400 */ | 295 | case 3: |
| 296 | HTTPERROR_400_PARAM; /* If the stats URL was mistakenly added as announce URL, return a 400 */ | ||
| 234 | } | 297 | } |
| 235 | } | 298 | } |
| 236 | 299 | ||
| 237 | #ifdef WANT_FULLSCRAPE | 300 | #ifdef WANT_FULLSCRAPE |
| 238 | if( mode == TASK_FULLSCRAPE_TRACKERSTATE ) { | 301 | if (mode == TASK_FULLSCRAPE_TRACKERSTATE) { |
| 239 | format = mode; mode = TASK_STATS_TPB; | 302 | format = mode; |
| 303 | mode = TASK_STATS_TPB; | ||
| 240 | } | 304 | } |
| 241 | 305 | ||
| 242 | if( mode == TASK_STATS_TPB ) { | 306 | if (mode == TASK_STATS_TPB) { |
| 243 | struct http_data* cookie = io_getcookie( sock ); | 307 | struct http_data *cookie = io_getcookie(sock); |
| 244 | tai6464 t; | 308 | tai6464 t; |
| 245 | #ifdef WANT_COMPRESSION_GZIP | 309 | #ifdef WANT_COMPRESSION_GZIP |
| 246 | ws->request[ws->request_size] = 0; | 310 | ws->request[ws->request_size] = 0; |
| 247 | #ifdef WANT_COMPRESSION_GZIP_ALWAYS | 311 | #ifndef WANT_COMPRESSION_GZIP_ALWAYS |
| 248 | if( strstr( read_ptr - 1, "gzip" ) ) { | 312 | if (strstr(read_ptr - 1, "gzip")) { |
| 249 | #endif | 313 | #endif |
| 250 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; | 314 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; |
| 251 | format |= TASK_FLAG_GZIP; | 315 | format |= TASK_FLAG_GZIP; |
| 252 | #ifdef WANT_COMPRESSION_GZIP_ALWAYS | 316 | #ifndef WANT_COMPRESSION_GZIP_ALWAYS |
| 253 | } | 317 | } |
| 254 | #endif | 318 | #endif |
| 255 | #endif | 319 | #endif |
| 256 | /* Pass this task to the worker thread */ | 320 | /* Pass this task to the worker thread */ |
| 257 | cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; | 321 | cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED; |
| 258 | 322 | ||
| 259 | /* Clients waiting for us should not easily timeout */ | 323 | /* Clients waiting for us should not easily timeout */ |
| 260 | taia_uint( &t, 0 ); io_timeout( sock, t ); | 324 | taia_uint(&t, 0); |
| 261 | fullscrape_deliver( sock, format ); | 325 | io_timeout(sock, t); |
| 262 | io_dontwantread( sock ); | 326 | fullscrape_deliver(sock, format); |
| 327 | io_dontwantread(sock); | ||
| 263 | return ws->reply_size = -2; | 328 | return ws->reply_size = -2; |
| 264 | } | 329 | } |
| 265 | #endif | 330 | #endif |
| 266 | 331 | ||
| 267 | /* default format for now */ | 332 | /* default format for now */ |
| 268 | if( ( mode & TASK_CLASS_MASK ) == TASK_STATS ) { | 333 | if ((mode & TASK_CLASS_MASK) == TASK_STATS) { |
| 269 | tai6464 t; | 334 | tai6464 t; |
| 270 | /* Complex stats also include expensive memory debugging tools */ | 335 | /* Complex stats also include expensive memory debugging tools */ |
| 271 | taia_uint( &t, 0 ); io_timeout( sock, t ); | 336 | taia_uint(&t, 0); |
| 272 | stats_deliver( sock, mode ); | 337 | io_timeout(sock, t); |
| 338 | stats_deliver(sock, mode); | ||
| 273 | return ws->reply_size = -2; | 339 | return ws->reply_size = -2; |
| 274 | } | 340 | } |
| 275 | 341 | ||
| 276 | /* Simple stats can be answerred immediately */ | 342 | /* Simple stats can be answerred immediately */ |
| 277 | return ws->reply_size = return_stats_for_tracker( ws->reply, mode, 0 ); | 343 | return ws->reply_size = return_stats_for_tracker(ws->reply, mode, 0); |
| 278 | } | 344 | } |
| 279 | 345 | ||
| 280 | #ifdef WANT_MODEST_FULLSCRAPES | 346 | #ifdef WANT_MODEST_FULLSCRAPES |
| 281 | static pthread_mutex_t g_modest_fullscrape_mutex = PTHREAD_MUTEX_INITIALIZER; | 347 | static pthread_mutex_t g_modest_fullscrape_mutex = PTHREAD_MUTEX_INITIALIZER; |
| 282 | static ot_vector g_modest_fullscrape_timeouts; | 348 | static ot_vector g_modest_fullscrape_timeouts; |
| 283 | typedef struct { ot_ip6 ip; ot_time last_fullscrape; } ot_scrape_log; | 349 | typedef struct { |
| 350 | ot_ip6 ip; | ||
| 351 | ot_time last_fullscrape; | ||
| 352 | } ot_scrape_log; | ||
| 284 | #endif | 353 | #endif |
| 285 | 354 | ||
| 286 | #ifdef WANT_FULLSCRAPE | 355 | #ifdef WANT_FULLSCRAPE |
| 287 | static ssize_t http_handle_fullscrape( const int64 sock, struct ot_workstruct *ws ) { | 356 | static ssize_t http_handle_fullscrape(const int64 sock, struct ot_workstruct *ws) { |
| 288 | struct http_data* cookie = io_getcookie( sock ); | 357 | struct http_data *cookie = io_getcookie(sock); |
| 289 | int format = 0; | 358 | int format = 0; |
| 290 | tai6464 t; | 359 | tai6464 t; |
| 291 | 360 | ||
| 292 | #ifdef WANT_MODEST_FULLSCRAPES | 361 | #ifdef WANT_MODEST_FULLSCRAPES |
| 293 | { | 362 | { |
| 294 | ot_scrape_log this_peer, *new_peer; | 363 | ot_scrape_log this_peer, *new_peer; |
| 295 | int exactmatch; | 364 | int exactmatch; |
| 296 | memcpy( this_peer.ip, cookie->ip, sizeof(ot_ip6)); | 365 | memcpy(this_peer.ip, cookie->ip, sizeof(ot_ip6)); |
| 297 | this_peer.last_fullscrape = g_now_seconds; | 366 | this_peer.last_fullscrape = g_now_seconds; |
| 298 | pthread_mutex_lock(&g_modest_fullscrape_mutex); | 367 | pthread_mutex_lock(&g_modest_fullscrape_mutex); |
| 299 | new_peer = vector_find_or_insert( &g_modest_fullscrape_timeouts, &this_peer, sizeof(ot_scrape_log), sizeof(ot_ip6), &exactmatch ); | 368 | new_peer = vector_find_or_insert(&g_modest_fullscrape_timeouts, &this_peer, sizeof(ot_scrape_log), sizeof(ot_ip6), &exactmatch); |
| 300 | if( !new_peer ) { | 369 | if (!new_peer) { |
| 301 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); | 370 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); |
| 302 | HTTPERROR_500; | 371 | HTTPERROR_500; |
| 303 | } | 372 | } |
| 304 | if( exactmatch && ( this_peer.last_fullscrape - new_peer->last_fullscrape ) < OT_MODEST_PEER_TIMEOUT ) { | 373 | if (exactmatch && (this_peer.last_fullscrape - new_peer->last_fullscrape) < OT_MODEST_PEER_TIMEOUT) { |
| 305 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); | 374 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); |
| 306 | HTTPERROR_402_NOTMODEST; | 375 | HTTPERROR_402_NOTMODEST; |
| 307 | } | 376 | } |
| 308 | memcpy( new_peer, &this_peer, sizeof(ot_scrape_log)); | 377 | memcpy(new_peer, &this_peer, sizeof(ot_scrape_log)); |
| 309 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); | 378 | pthread_mutex_unlock(&g_modest_fullscrape_mutex); |
| 310 | } | 379 | } |
| 311 | #endif | 380 | #endif |
| 312 | 381 | ||
| 382 | |||
| 383 | #if defined(WANT_COMPRESSION_GZIP) || defined(WANT_COMPRESSION_ZSTD) | ||
| 384 | ws->request[ws->request_size - 1] = 0; | ||
| 313 | #ifdef WANT_COMPRESSION_GZIP | 385 | #ifdef WANT_COMPRESSION_GZIP |
| 314 | ws->request[ws->request_size-1] = 0; | 386 | if (strstr(ws->request, "gzip")) { |
| 315 | if( strstr( ws->request, "gzip" ) ) { | ||
| 316 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; | 387 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; |
| 317 | format = TASK_FLAG_GZIP; | 388 | format |= TASK_FLAG_GZIP; |
| 318 | stats_issue_event( EVENT_FULLSCRAPE_REQUEST_GZIP, 0, (uintptr_t)cookie->ip ); | 389 | } |
| 319 | } else | 390 | #endif |
| 391 | #ifdef WANT_COMPRESSION_ZSTD | ||
| 392 | if (strstr(ws->request, "zstd")) { | ||
| 393 | cookie->flag |= STRUCT_HTTP_FLAG_ZSTD; | ||
| 394 | format |= TASK_FLAG_ZSTD; | ||
| 395 | } | ||
| 320 | #endif | 396 | #endif |
| 321 | stats_issue_event( EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)cookie->ip ); | 397 | |
| 398 | #if defined(WANT_COMPRESSION_ZSTD) && defined(WANT_COMPRESSION_ZSTD_ALWAYS) | ||
| 399 | cookie->flag |= STRUCT_HTTP_FLAG_ZSTD; | ||
| 400 | format |= TASK_FLAG_ZSTD; | ||
| 401 | #endif | ||
| 402 | |||
| 403 | #if defined(WANT_COMPRESSION_GZIP) && defined(WANT_COMPRESSION_GZIP_ALWAYS) | ||
| 404 | cookie->flag |= STRUCT_HTTP_FLAG_GZIP; | ||
| 405 | format |= TASK_FLAG_GZIP; | ||
| 406 | #endif | ||
| 407 | #endif | ||
| 408 | |||
| 409 | stats_issue_event(EVENT_FULLSCRAPE_REQUEST, 0, (uintptr_t)cookie->ip); | ||
| 322 | 410 | ||
| 323 | #ifdef _DEBUG_HTTPERROR | 411 | #ifdef _DEBUG_HTTPERROR |
| 324 | fprintf( stderr, "%s", ws->debugbuf ); | 412 | fprintf(stderr, "%s", ws->debugbuf); |
| 325 | #endif | 413 | #endif |
| 326 | 414 | ||
| 327 | /* Pass this task to the worker thread */ | 415 | /* Pass this task to the worker thread */ |
| 328 | cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK; | 416 | cookie->flag |= STRUCT_HTTP_FLAG_WAITINGFORTASK | STRUCT_HTTP_FLAG_CHUNKED; |
| 329 | /* Clients waiting for us should not easily timeout */ | 417 | /* Clients waiting for us should not easily timeout */ |
| 330 | taia_uint( &t, 0 ); io_timeout( sock, t ); | 418 | taia_uint(&t, 0); |
| 331 | fullscrape_deliver( sock, TASK_FULLSCRAPE | format ); | 419 | io_timeout(sock, t); |
| 332 | io_dontwantread( sock ); | 420 | fullscrape_deliver(sock, TASK_FULLSCRAPE | format); |
| 421 | io_dontwantread(sock); | ||
| 333 | return ws->reply_size = -2; | 422 | return ws->reply_size = -2; |
| 334 | } | 423 | } |
| 335 | #endif | 424 | #endif |
| 336 | 425 | ||
| 337 | static ssize_t http_handle_scrape( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { | 426 | static ssize_t http_handle_scrape(const int64 sock, struct ot_workstruct *ws, char *read_ptr) { |
| 338 | static const ot_keywords keywords_scrape[] = { { "info_hash", 1 }, { NULL, -3 } }; | 427 | static const ot_keywords keywords_scrape[] = {{"info_hash", 1}, {NULL, -3}}; |
| 339 | 428 | ||
| 340 | ot_hash * multiscrape_buf = (ot_hash*)ws->request; | 429 | ot_hash *multiscrape_buf = (ot_hash *)ws->request; |
| 341 | int scanon = 1, numwant = 0; | 430 | int scanon = 1, numwant = 0; |
| 342 | 431 | ||
| 343 | /* This is to hack around stupid clients that send "scrape ?info_hash" */ | 432 | /* This is to hack around stupid clients that send "scrape ?info_hash" */ |
| 344 | if( read_ptr[-1] != '?' ) { | 433 | if (read_ptr[-1] != '?') { |
| 345 | while( ( *read_ptr != '?' ) && ( *read_ptr != '\n' ) ) ++read_ptr; | 434 | while ((*read_ptr != '?') && (*read_ptr != '\n')) |
| 346 | if( *read_ptr == '\n' ) HTTPERROR_400_PARAM; | 435 | ++read_ptr; |
| 436 | if (*read_ptr == '\n') | ||
| 437 | HTTPERROR_400_PARAM; | ||
| 347 | ++read_ptr; | 438 | ++read_ptr; |
| 348 | } | 439 | } |
| 349 | 440 | ||
| 350 | while( scanon ) { | 441 | while (scanon) { |
| 351 | switch( scan_find_keywords( keywords_scrape, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { | 442 | switch (scan_find_keywords(keywords_scrape, &read_ptr, SCAN_SEARCHPATH_PARAM)) { |
| 352 | case -2: scanon = 0; break; /* TERMINATOR */ | 443 | case -2: |
| 353 | default: HTTPERROR_400_PARAM; /* PARSE ERROR */ | 444 | scanon = 0; |
| 354 | case -3: scan_urlencoded_skipvalue( &read_ptr ); break; | 445 | break; /* TERMINATOR */ |
| 355 | case 1: /* matched "info_hash" */ | 446 | default: |
| 447 | HTTPERROR_400_PARAM; /* PARSE ERROR */ | ||
| 448 | case -3: | ||
| 449 | scan_urlencoded_skipvalue(&read_ptr); | ||
| 450 | break; | ||
| 451 | case 1: /* matched "info_hash" */ | ||
| 356 | /* ignore this, when we have less than 20 bytes */ | 452 | /* ignore this, when we have less than 20 bytes */ |
| 357 | if( scan_urlencoded_query( &read_ptr, (char*)(multiscrape_buf + numwant++), SCAN_SEARCHPATH_VALUE ) != (ssize_t)sizeof(ot_hash) ) | 453 | if (scan_urlencoded_query(&read_ptr, (char *)(multiscrape_buf + numwant++), SCAN_SEARCHPATH_VALUE) != (ssize_t)sizeof(ot_hash)) |
| 358 | HTTPERROR_400_PARAM; | 454 | HTTPERROR_400_PARAM; |
| 359 | break; | 455 | break; |
| 360 | } | 456 | } |
| 361 | } | 457 | } |
| 362 | 458 | ||
| 363 | /* No info_hash found? Inform user */ | 459 | /* No info_hash found? Inform user */ |
| 364 | if( !numwant ) HTTPERROR_400_PARAM; | 460 | if (!numwant) |
| 461 | HTTPERROR_400_PARAM; | ||
| 365 | 462 | ||
| 366 | /* Limit number of hashes to process */ | 463 | /* Limit number of hashes to process */ |
| 367 | if( numwant > OT_MAXMULTISCRAPE_COUNT ) | 464 | if (numwant > OT_MAXMULTISCRAPE_COUNT) |
| 368 | numwant = OT_MAXMULTISCRAPE_COUNT; | 465 | numwant = OT_MAXMULTISCRAPE_COUNT; |
| 369 | 466 | ||
| 370 | /* Enough for http header + whole scrape string */ | 467 | /* Enough for http header + whole scrape string */ |
| 371 | ws->reply_size = return_tcp_scrape_for_torrent( multiscrape_buf, numwant, ws->reply ); | 468 | ws->reply_size = return_tcp_scrape_for_torrent((const ot_hash *)multiscrape_buf, numwant, ws->reply); |
| 372 | stats_issue_event( EVENT_SCRAPE, FLAG_TCP, ws->reply_size ); | 469 | stats_issue_event(EVENT_SCRAPE, FLAG_TCP, ws->reply_size); |
| 373 | return ws->reply_size; | 470 | return ws->reply_size; |
| 374 | } | 471 | } |
| 375 | 472 | ||
| 376 | #ifdef WANT_LOG_NUMWANT | 473 | #ifdef WANT_LOG_NUMWANT |
| 377 | unsigned long long numwants[201]; | 474 | unsigned long long numwants[201]; |
| 378 | #endif | 475 | #endif |
| 379 | 476 | ||
| 380 | #if defined( WANT_KEEPALIVE ) || defined( WANT_IP_FROM_PROXY ) | 477 | #if defined(WANT_KEEPALIVE) || defined(WANT_IP_FROM_PROXY) |
| 381 | static char* http_header( char *data, size_t byte_count, char *header ) { | 478 | static char *http_header(char *data, size_t byte_count, char *header) { |
| 382 | size_t i; | 479 | size_t i; |
| 383 | long sl = strlen( header ); | 480 | long sl = strlen(header); |
| 384 | for( i = 0; i + sl + 2 < byte_count; ++i ) { | 481 | for (i = 0; i + sl + 2 < byte_count; ++i) { |
| 385 | if( data[i] != '\n' || data[ i + sl + 1] != ':' ) continue; | 482 | if (data[i] != '\n' || data[i + sl + 1] != ':') |
| 386 | if( !case_equalb( data + i + 1, sl, header ) ) continue; | 483 | continue; |
| 484 | if (!case_equalb(data + i + 1, sl, header)) | ||
| 485 | continue; | ||
| 387 | data += i + sl + 2; | 486 | data += i + sl + 2; |
| 388 | while( *data == ' ' || *data == '\t' ) ++data; | 487 | while (*data == ' ' || *data == '\t') |
| 488 | ++data; | ||
| 389 | return data; | 489 | return data; |
| 390 | } | 490 | } |
| 391 | return 0; | 491 | return 0; |
| 392 | } | 492 | } |
| 393 | #endif | 493 | #endif |
| 394 | 494 | ||
| 395 | static ot_keywords keywords_announce[] = { { "port", 1 }, { "left", 2 }, { "event", 3 }, { "numwant", 4 }, { "compact", 5 }, { "compact6", 5 }, { "info_hash", 6 }, | 495 | static ot_keywords keywords_announce[] = {{"port", 1}, {"left", 2}, {"event", 3}, {"numwant", 4}, {"compact", 5}, {"compact6", 5}, {"info_hash", 6}, |
| 396 | #ifdef WANT_IP_FROM_QUERY_STRING | 496 | #ifdef WANT_IP_FROM_QUERY_STRING |
| 397 | { "ip", 7 }, | 497 | {"ip", 7}, |
| 398 | #endif | 498 | #endif |
| 399 | #ifdef WANT_FULLLOG_NETWORKS | 499 | #ifdef WANT_FULLLOG_NETWORKS |
| 400 | { "lognet", 8 }, | 500 | {"lognet", 8}, |
| 401 | #endif | 501 | #endif |
| 402 | { "peer_id", 9 }, | 502 | {"peer_id", 9}, {NULL, -3}}; |
| 403 | { NULL, -3 } }; | 503 | static ot_keywords keywords_announce_event[] = {{"completed", 1}, {"stopped", 2}, {NULL, -3}}; |
| 404 | static ot_keywords keywords_announce_event[] = { { "completed", 1 }, { "stopped", 2 }, { NULL, -3 } }; | 504 | static ssize_t http_handle_announce(const int64 sock, struct ot_workstruct *ws, char *read_ptr) { |
| 405 | static ssize_t http_handle_announce( const int64 sock, struct ot_workstruct *ws, char *read_ptr ) { | ||
| 406 | int numwant, tmp, scanon; | 505 | int numwant, tmp, scanon; |
| 407 | unsigned short port = 0; | 506 | unsigned short port = 0; |
| 408 | char *write_ptr; | 507 | char *write_ptr; |
| 409 | ssize_t len; | 508 | ssize_t len; |
| 410 | struct http_data *cookie = io_getcookie( sock ); | 509 | struct http_data *cookie = io_getcookie(sock); |
| 411 | 510 | ||
| 412 | /* This is to hack around stupid clients that send "announce ?info_hash" */ | 511 | /* This is to hack around stupid clients that send "announce ?info_hash" */ |
| 413 | if( read_ptr[-1] != '?' ) { | 512 | if (read_ptr[-1] != '?') { |
| 414 | while( ( *read_ptr != '?' ) && ( *read_ptr != '\n' ) ) ++read_ptr; | 513 | while ((*read_ptr != '?') && (*read_ptr != '\n')) |
| 415 | if( *read_ptr == '\n' ) HTTPERROR_400_PARAM; | 514 | ++read_ptr; |
| 515 | if (*read_ptr == '\n') | ||
| 516 | HTTPERROR_400_PARAM; | ||
| 416 | ++read_ptr; | 517 | ++read_ptr; |
| 417 | } | 518 | } |
| 418 | 519 | ||
| 419 | #ifdef WANT_IP_FROM_PROXY | 520 | #ifdef WANT_IP_FROM_PROXY |
| 420 | if( accesslist_is_blessed( cookie->ip, OT_PERMISSION_MAY_PROXY ) ) { | 521 | if (accesslist_is_blessed(cookie->ip, OT_PERMISSION_MAY_PROXY)) { |
| 421 | ot_ip6 proxied_ip; | 522 | ot_ip6 proxied_ip; |
| 422 | char *fwd = http_header( ws->request, ws->header_size, "x-forwarded-for" ); | 523 | char *fwd = http_header(ws->request, ws->header_size, "x-forwarded-for"); |
| 423 | if( fwd && scan_ip6( fwd, proxied_ip ) ) | 524 | if (fwd && scan_ip6(fwd, proxied_ip)) { |
| 424 | OT_SETIP( &ws->peer, proxied_ip ); | 525 | OT_SETIP(ws->peer, proxied_ip); |
| 425 | else | 526 | } else |
| 426 | OT_SETIP( &ws->peer, cookie->ip ); | 527 | OT_SETIP(ws->peer, cookie->ip); |
| 427 | } else | 528 | } else |
| 428 | #endif | 529 | #endif |
| 429 | OT_SETIP( &ws->peer, cookie->ip ); | 530 | OT_SETIP(ws->peer, cookie->ip); |
| 430 | 531 | ||
| 431 | ws->peer_id = NULL; | 532 | ws->peer_id = NULL; |
| 432 | ws->hash = NULL; | 533 | ws->hash = NULL; |
| 433 | 534 | ||
| 434 | OT_SETPORT( &ws->peer, &port ); | 535 | OT_SETPORT(ws->peer, &port); |
| 435 | OT_PEERFLAG( &ws->peer ) = 0; | 536 | OT_PEERFLAG(ws->peer) = 0; |
| 436 | numwant = 50; | 537 | numwant = 50; |
| 437 | scanon = 1; | 538 | scanon = 1; |
| 438 | 539 | ||
| 439 | while( scanon ) { | 540 | while (scanon) { |
| 440 | switch( scan_find_keywords(keywords_announce, &read_ptr, SCAN_SEARCHPATH_PARAM ) ) { | 541 | switch (scan_find_keywords(keywords_announce, &read_ptr, SCAN_SEARCHPATH_PARAM)) { |
| 441 | case -2: scanon = 0; break; /* TERMINATOR */ | 542 | case -2: |
| 442 | case -1: HTTPERROR_400_PARAM; /* PARSE ERROR */ | 543 | scanon = 0; |
| 443 | case -3: scan_urlencoded_skipvalue( &read_ptr ); break; | 544 | break; /* TERMINATOR */ |
| 545 | case -1: | ||
| 546 | HTTPERROR_400_PARAM; /* PARSE ERROR */ | ||
| 547 | case -3: | ||
| 548 | scan_urlencoded_skipvalue(&read_ptr); | ||
| 549 | break; | ||
| 444 | case 1: /* matched "port" */ | 550 | case 1: /* matched "port" */ |
| 445 | len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); | 551 | len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE); |
| 446 | if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &tmp ) || ( tmp > 0xffff ) ) HTTPERROR_400_PARAM; | 552 | if ((len <= 0) || scan_fixed_int(write_ptr, len, &tmp) || (tmp > 0xffff)) |
| 447 | port = htons( tmp ); OT_SETPORT( &ws->peer, &port ); | 553 | HTTPERROR_400_PARAM; |
| 554 | port = htons(tmp); | ||
| 555 | OT_SETPORT(&ws->peer, &port); | ||
| 448 | break; | 556 | break; |
| 449 | case 2: /* matched "left" */ | 557 | case 2: /* matched "left" */ |
| 450 | if( ( len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) ) <= 0 ) HTTPERROR_400_PARAM; | 558 | if ((len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE)) <= 0) |
| 451 | if( scan_fixed_int( write_ptr, len, &tmp ) ) tmp = 0; | 559 | HTTPERROR_400_PARAM; |
| 452 | if( !tmp ) OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_SEEDING; | 560 | if (scan_fixed_int(write_ptr, len, &tmp)) |
| 561 | tmp = 0; | ||
| 562 | if (!tmp) | ||
| 563 | OT_PEERFLAG(&ws->peer) |= PEER_FLAG_SEEDING; | ||
| 453 | break; | 564 | break; |
| 454 | case 3: /* matched "event" */ | 565 | case 3: /* matched "event" */ |
| 455 | switch( scan_find_keywords( keywords_announce_event, &read_ptr, SCAN_SEARCHPATH_VALUE ) ) { | 566 | switch (scan_find_keywords(keywords_announce_event, &read_ptr, SCAN_SEARCHPATH_VALUE)) { |
| 456 | case -1: HTTPERROR_400_PARAM; | 567 | case -1: |
| 457 | case 1: /* matched "completed" */ | 568 | HTTPERROR_400_PARAM; |
| 458 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED; | 569 | case 1: /* matched "completed" */ |
| 459 | break; | 570 | OT_PEERFLAG(&ws->peer) |= PEER_FLAG_COMPLETED; |
| 460 | case 2: /* matched "stopped" */ | 571 | break; |
| 461 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED; | 572 | case 2: /* matched "stopped" */ |
| 462 | break; | 573 | OT_PEERFLAG(&ws->peer) |= PEER_FLAG_STOPPED; |
| 463 | default: | 574 | break; |
| 464 | break; | 575 | default: |
| 576 | break; | ||
| 465 | } | 577 | } |
| 466 | break; | 578 | break; |
| 467 | case 4: /* matched "numwant" */ | 579 | case 4: /* matched "numwant" */ |
| 468 | len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); | 580 | len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE); |
| 469 | if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &numwant ) ) HTTPERROR_400_PARAM; | 581 | if ((len <= 0) || scan_fixed_int(write_ptr, len, &numwant)) |
| 470 | if( numwant < 0 ) numwant = 50; | 582 | HTTPERROR_400_PARAM; |
| 471 | if( numwant > 200 ) numwant = 200; | 583 | if (numwant < 0) |
| 584 | numwant = 50; | ||
| 585 | if (numwant > 200) | ||
| 586 | numwant = 200; | ||
| 472 | break; | 587 | break; |
| 473 | case 5: /* matched "compact" */ | 588 | case 5: /* matched "compact" */ |
| 474 | len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ); | 589 | len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE); |
| 475 | if( ( len <= 0 ) || scan_fixed_int( write_ptr, len, &tmp ) ) HTTPERROR_400_PARAM; | 590 | if ((len <= 0) || scan_fixed_int(write_ptr, len, &tmp)) |
| 476 | if( !tmp ) HTTPERROR_400_COMPACT; | 591 | HTTPERROR_400_PARAM; |
| 592 | if (!tmp) | ||
| 593 | HTTPERROR_400_COMPACT; | ||
| 477 | break; | 594 | break; |
| 478 | case 6: /* matched "info_hash" */ | 595 | case 6: /* matched "info_hash" */ |
| 479 | if( ws->hash ) HTTPERROR_400_DOUBLEHASH; | 596 | if (ws->hash) |
| 597 | HTTPERROR_400_DOUBLEHASH; | ||
| 480 | /* ignore this, when we have less than 20 bytes */ | 598 | /* ignore this, when we have less than 20 bytes */ |
| 481 | if( scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) != 20 ) HTTPERROR_400_PARAM; | 599 | if (scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE) != 20) |
| 482 | ws->hash = (ot_hash*)write_ptr; | 600 | HTTPERROR_400_PARAM; |
| 601 | ws->hash = (ot_hash *)write_ptr; | ||
| 483 | break; | 602 | break; |
| 484 | #ifdef WANT_IP_FROM_QUERY_STRING | 603 | #ifdef WANT_IP_FROM_QUERY_STRING |
| 485 | case 7: /* matched "ip" */ | 604 | case 7: /* matched "ip" */ |
| 486 | { | 605 | { |
| 487 | char *tmp_buf1 = ws->reply, *tmp_buf2 = ws->reply+16; | 606 | char *tmp_buf1 = ws->reply, *tmp_buf2 = ws->reply + 16; |
| 488 | len = scan_urlencoded_query( &read_ptr, tmp_buf2, SCAN_SEARCHPATH_VALUE ); | 607 | len = scan_urlencoded_query(&read_ptr, tmp_buf2, SCAN_SEARCHPATH_VALUE); |
| 489 | tmp_buf2[len] = 0; | 608 | tmp_buf2[len] = 0; |
| 490 | if( ( len <= 0 ) || !scan_ip6( tmp_buf2, tmp_buf1 ) ) HTTPERROR_400_PARAM; | 609 | if ((len <= 0) || !scan_ip6(tmp_buf2, tmp_buf1)) |
| 491 | OT_SETIP( &ws->peer, tmp_buf1 ); | 610 | HTTPERROR_400_PARAM; |
| 492 | } | 611 | OT_SETIP(&ws->peer, tmp_buf1); |
| 493 | break; | 612 | } break; |
| 494 | #endif | 613 | #endif |
| 495 | #ifdef WANT_FULLLOG_NETWORKS | 614 | #ifdef WANT_FULLLOG_NETWORKS |
| 496 | case 8: /* matched "lognet" */ | 615 | case 8: /* matched "lognet" */ |
| 497 | { | 616 | { |
| 498 | //if( accesslist_is_blessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) { | 617 | // if( accesslist_is_blessed( cookie->ip, OT_PERMISSION_MAY_STAT ) ) { |
| 499 | char *tmp_buf = ws->reply; | 618 | char *tmp_buf = ws->reply; |
| 500 | ot_net net; | 619 | ot_net net; |
| 501 | signed short parsed, bits; | 620 | signed short parsed, bits; |
| 502 | 621 | ||
| 503 | len = scan_urlencoded_query( &read_ptr, tmp_buf, SCAN_SEARCHPATH_VALUE ); | 622 | len = scan_urlencoded_query(&read_ptr, tmp_buf, SCAN_SEARCHPATH_VALUE); |
| 504 | tmp_buf[len] = 0; | 623 | tmp_buf[len] = 0; |
| 505 | if( len <= 0 ) HTTPERROR_400_PARAM; | 624 | if (len <= 0) |
| 506 | if( *tmp_buf == '-' ) { | 625 | HTTPERROR_400_PARAM; |
| 507 | loglist_reset( ); | 626 | if (*tmp_buf == '-') { |
| 508 | return ws->reply_size = sprintf( ws->reply, "Successfully removed.\n" ); | 627 | loglist_reset(); |
| 509 | } | 628 | return ws->reply_size = sprintf(ws->reply, "Successfully removed.\n"); |
| 510 | parsed = scan_ip6( tmp_buf, net.address ); | ||
| 511 | if( !parsed ) HTTPERROR_400_PARAM; | ||
| 512 | if( tmp_buf[parsed++] != '/' ) | ||
| 513 | bits = 128; | ||
| 514 | else { | ||
| 515 | parsed = scan_short( tmp_buf + parsed, &bits ); | ||
| 516 | if( !parsed ) HTTPERROR_400_PARAM; | ||
| 517 | if( ip6_isv4mapped( net.address ) ) | ||
| 518 | bits += 96; | ||
| 519 | } | ||
| 520 | net.bits = bits; | ||
| 521 | loglist_add_network( &net ); | ||
| 522 | return ws->reply_size = sprintf( ws->reply, "Successfully added.\n" ); | ||
| 523 | //} | ||
| 524 | } | 629 | } |
| 525 | break; | 630 | parsed = scan_ip6(tmp_buf, net.address); |
| 631 | if (!parsed) | ||
| 632 | HTTPERROR_400_PARAM; | ||
| 633 | if (tmp_buf[parsed++] != '/') | ||
| 634 | bits = 128; | ||
| 635 | else { | ||
| 636 | parsed = scan_short(tmp_buf + parsed, &bits); | ||
| 637 | if (!parsed) | ||
| 638 | HTTPERROR_400_PARAM; | ||
| 639 | if (ip6_isv4mapped(net.address)) | ||
| 640 | bits += 96; | ||
| 641 | } | ||
| 642 | net.bits = bits; | ||
| 643 | loglist_add_network(&net); | ||
| 644 | return ws->reply_size = sprintf(ws->reply, "Successfully added.\n"); | ||
| 645 | //} | ||
| 646 | } break; | ||
| 526 | #endif | 647 | #endif |
| 527 | case 9: /* matched "peer_id" */ | 648 | case 9: /* matched "peer_id" */ |
| 528 | /* ignore this, when we have less than 20 bytes */ | 649 | /* ignore this, when we have less than 20 bytes */ |
| 529 | if( scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE ) != 20 ) HTTPERROR_400_PARAM; | 650 | if (scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_SEARCHPATH_VALUE) != 20) |
| 530 | ws->peer_id = write_ptr; | 651 | HTTPERROR_400_PARAM; |
| 531 | break; | 652 | ws->peer_id = write_ptr; |
| 653 | break; | ||
| 532 | } | 654 | } |
| 533 | } | 655 | } |
| 534 | 656 | ||
| @@ -541,100 +663,107 @@ static ssize_t http_handle_announce( const int64 sock, struct ot_workstruct *ws, | |||
| 541 | */ | 663 | */ |
| 542 | 664 | ||
| 543 | /* Scanned whole query string */ | 665 | /* Scanned whole query string */ |
| 544 | if( !ws->hash ) | 666 | if (!ws->hash) |
| 545 | return ws->reply_size = sprintf( ws->reply, "d14:failure reason80:Your client forgot to send your torrent's info_hash. Please upgrade your client.e" ); | 667 | return ws->reply_size = sprintf(ws->reply, "d14:failure reason80:Your client forgot to send your torrent's info_hash. Please upgrade your client.e"); |
| 546 | 668 | ||
| 547 | if( OT_PEERFLAG( &ws->peer ) & PEER_FLAG_STOPPED ) | 669 | if (OT_PEERFLAG(&ws->peer) & PEER_FLAG_STOPPED) |
| 548 | ws->reply_size = remove_peer_from_torrent( FLAG_TCP, ws ); | 670 | ws->reply_size = remove_peer_from_torrent(FLAG_TCP, ws); |
| 549 | else | 671 | else |
| 550 | ws->reply_size = add_peer_to_torrent_and_return_peers( FLAG_TCP, ws, numwant ); | 672 | ws->reply_size = add_peer_to_torrent_and_return_peers(FLAG_TCP, ws, numwant); |
| 551 | 673 | ||
| 552 | stats_issue_event( EVENT_ANNOUNCE, FLAG_TCP, ws->reply_size); | 674 | stats_issue_event(EVENT_ANNOUNCE, FLAG_TCP, ws->reply_size); |
| 553 | return ws->reply_size; | 675 | return ws->reply_size; |
| 554 | } | 676 | } |
| 555 | 677 | ||
| 556 | ssize_t http_handle_request( const int64 sock, struct ot_workstruct *ws ) { | 678 | ssize_t http_handle_request(const int64 sock, struct ot_workstruct *ws) { |
| 557 | ssize_t reply_off, len; | 679 | ssize_t reply_off, len; |
| 558 | char *read_ptr = ws->request, *write_ptr; | 680 | char *read_ptr = ws->request, *write_ptr; |
| 559 | 681 | ||
| 560 | #ifdef WANT_FULLLOG_NETWORKS | 682 | #ifdef WANT_FULLLOG_NETWORKS |
| 561 | struct http_data *cookie = io_getcookie( sock ); | 683 | struct http_data *cookie = io_getcookie(sock); |
| 562 | if( loglist_check_address( cookie->ip ) ) { | 684 | if (loglist_check_address(cookie->ip)) { |
| 563 | ot_log *log = malloc( sizeof( ot_log ) ); | 685 | ot_log *log = malloc(sizeof(ot_log)); |
| 564 | if( log ) { | 686 | if (log) { |
| 565 | log->size = ws->request_size; | 687 | log->size = ws->request_size; |
| 566 | log->data = malloc( ws->request_size ); | 688 | log->data = malloc(ws->request_size); |
| 567 | log->next = 0; | 689 | log->next = 0; |
| 568 | log->time = g_now_seconds; | 690 | log->time = g_now_seconds; |
| 569 | memcpy( log->ip, cookie->ip, sizeof(ot_ip6)); | 691 | memcpy(log->ip, cookie->ip, sizeof(ot_ip6)); |
| 570 | if( log->data ) { | 692 | if (log->data) { |
| 571 | memcpy( log->data, ws->request, ws->request_size ); | 693 | memcpy(log->data, ws->request, ws->request_size); |
| 572 | if( !g_logchain_first ) | 694 | if (!g_logchain_first) |
| 573 | g_logchain_first = g_logchain_last = log; | 695 | g_logchain_first = g_logchain_last = log; |
| 574 | else { | 696 | else { |
| 575 | g_logchain_last->next = log; | 697 | g_logchain_last->next = log; |
| 576 | g_logchain_last = log; | 698 | g_logchain_last = log; |
| 577 | } | 699 | } |
| 578 | } else | 700 | } else |
| 579 | free( log ); | 701 | free(log); |
| 580 | } | 702 | } |
| 581 | } | 703 | } |
| 582 | #endif | 704 | #endif |
| 583 | 705 | ||
| 584 | #ifdef _DEBUG_HTTPERROR | 706 | #ifdef _DEBUG_HTTPERROR |
| 585 | reply_off = ws->request_size; | 707 | reply_off = ws->request_size; |
| 586 | if( ws->request_size >= G_DEBUGBUF_SIZE ) | 708 | if (ws->request_size >= G_DEBUGBUF_SIZE) |
| 587 | reply_off = G_DEBUGBUF_SIZE - 1; | 709 | reply_off = G_DEBUGBUF_SIZE - 1; |
| 588 | memcpy( ws->debugbuf, ws->request, reply_off ); | 710 | memcpy(ws->debugbuf, ws->request, reply_off); |
| 589 | ws->debugbuf[ reply_off ] = 0; | 711 | ws->debugbuf[reply_off] = 0; |
| 590 | #endif | 712 | #endif |
| 591 | 713 | ||
| 592 | /* Tell subroutines where to put reply data */ | 714 | /* Tell subroutines where to put reply data */ |
| 593 | ws->reply = ws->outbuf + SUCCESS_HTTP_HEADER_LENGTH; | 715 | ws->reply = ws->outbuf + SUCCESS_HTTP_HEADER_LENGTH; |
| 594 | 716 | ||
| 595 | /* This one implicitely tests strlen < 5, too -- remember, it is \n terminated */ | 717 | /* This one implicitely tests strlen < 5, too -- remember, it is \n terminated */ |
| 596 | if( memcmp( read_ptr, "GET /", 5) ) HTTPERROR_400; | 718 | if (memcmp(read_ptr, "GET /", 5)) |
| 719 | HTTPERROR_400; | ||
| 597 | 720 | ||
| 598 | /* Skip leading '/' */ | 721 | /* Skip leading '/' */ |
| 599 | for( read_ptr+=4; *read_ptr == '/'; ++read_ptr); | 722 | for (read_ptr += 4; *read_ptr == '/'; ++read_ptr) |
| 723 | ; | ||
| 600 | 724 | ||
| 601 | /* Try to parse the request. | 725 | /* Try to parse the request. |
| 602 | In reality we abandoned requiring the url to be correct. This now | 726 | In reality we abandoned requiring the url to be correct. This now |
| 603 | only decodes url encoded characters, we check for announces and | 727 | only decodes url encoded characters, we check for announces and |
| 604 | scrapes by looking for "a*" or "sc" */ | 728 | scrapes by looking for "a*" or "sc" */ |
| 605 | len = scan_urlencoded_query( &read_ptr, write_ptr = read_ptr, SCAN_PATH ); | 729 | len = scan_urlencoded_query(&read_ptr, write_ptr = read_ptr, SCAN_PATH); |
| 606 | 730 | ||
| 607 | /* If parsing returned an error, leave with not found */ | 731 | /* If parsing returned an error, leave with not found */ |
| 608 | if( g_redirecturl && ( len == -2 ) ) HTTPERROR_302; | 732 | if (g_redirecturl && (len == -2)) |
| 609 | if( len <= 0 ) HTTPERROR_404; | 733 | HTTPERROR_302; |
| 734 | if (len <= 0) | ||
| 735 | HTTPERROR_404; | ||
| 610 | 736 | ||
| 611 | /* This is the hardcore match for announce*/ | 737 | /* This is the hardcore match for announce*/ |
| 612 | if( ( *write_ptr == 'a' ) || ( *write_ptr == '?' ) ) | 738 | if ((*write_ptr == 'a') || (*write_ptr == '?')) |
| 613 | http_handle_announce( sock, ws, read_ptr ); | 739 | http_handle_announce(sock, ws, read_ptr); |
| 614 | #ifdef WANT_FULLSCRAPE | 740 | #ifdef WANT_FULLSCRAPE |
| 615 | else if( !memcmp( write_ptr, "scrape HTTP/", 12 ) ) | 741 | else if (!memcmp(write_ptr, "scrape HTTP/", 12)) |
| 616 | http_handle_fullscrape( sock, ws ); | 742 | http_handle_fullscrape(sock, ws); |
| 617 | #endif | 743 | #endif |
| 618 | /* This is the hardcore match for scrape */ | 744 | /* This is the hardcore match for scrape */ |
| 619 | else if( !memcmp( write_ptr, "sc", 2 ) ) | 745 | else if (!memcmp(write_ptr, "sc", 2)) |
| 620 | http_handle_scrape( sock, ws, read_ptr ); | 746 | http_handle_scrape(sock, ws, read_ptr); |
| 621 | /* All the rest is matched the standard way */ | 747 | /* All the rest is matched the standard way */ |
| 622 | else if( len == g_stats_path_len && !memcmp( write_ptr, g_stats_path, len ) ) | 748 | else if (len == g_stats_path_len && !memcmp(write_ptr, g_stats_path, len)) |
| 623 | http_handle_stats( sock, ws, read_ptr ); | 749 | http_handle_stats(sock, ws, read_ptr); |
| 624 | else | 750 | else |
| 625 | HTTPERROR_404; | 751 | HTTPERROR_404; |
| 626 | 752 | ||
| 627 | /* Find out if the client wants to keep this connection alive */ | 753 | /* Find out if the client wants to keep this connection alive */ |
| 628 | ws->keep_alive = 0; | 754 | ws->keep_alive = 0; |
| 629 | #ifdef WANT_KEEPALIVE | 755 | #ifdef WANT_KEEPALIVE |
| 630 | read_ptr=http_header( ws->request, ws->header_size, "connection"); | 756 | read_ptr = http_header(ws->request, ws->header_size, "connection"); |
| 631 | if( read_ptr && ( *read_ptr == 'K' || *read_ptr == 'k' ) ) ws->keep_alive = 1; | 757 | if (read_ptr && (*read_ptr == 'K' || *read_ptr == 'k')) |
| 758 | ws->keep_alive = 1; | ||
| 632 | #endif | 759 | #endif |
| 633 | 760 | ||
| 634 | /* If routines handled sending themselves, just return */ | 761 | /* If routines handled sending themselves, just return */ |
| 635 | if( ws->reply_size == -2 ) return 0; | 762 | if (ws->reply_size == -2) |
| 763 | return 0; | ||
| 636 | /* If routine failed, let http error take over */ | 764 | /* If routine failed, let http error take over */ |
| 637 | if( ws->reply_size <= 0 ) HTTPERROR_500; | 765 | if (ws->reply_size <= 0) |
| 766 | HTTPERROR_500; | ||
| 638 | 767 | ||
| 639 | /* This one is rather ugly, so I take you step by step through it. | 768 | /* This one is rather ugly, so I take you step by step through it. |
| 640 | 769 | ||
| @@ -643,18 +772,16 @@ ssize_t http_handle_request( const int64 sock, struct ot_workstruct *ws ) { | |||
| 643 | plus dynamic space needed to expand our Content-Length value. We reserve SUCCESS_HTTP_SIZE_OFF for its expansion and calculate | 772 | plus dynamic space needed to expand our Content-Length value. We reserve SUCCESS_HTTP_SIZE_OFF for its expansion and calculate |
| 644 | the space NOT needed to expand in reply_off | 773 | the space NOT needed to expand in reply_off |
| 645 | */ | 774 | */ |
| 646 | reply_off = SUCCESS_HTTP_SIZE_OFF - snprintf( ws->outbuf, 0, "%zd", ws->reply_size ); | 775 | reply_off = SUCCESS_HTTP_SIZE_OFF - snprintf(ws->outbuf, 0, "%zd", ws->reply_size); |
| 647 | ws->reply = ws->outbuf + reply_off; | 776 | ws->reply = ws->outbuf + reply_off; |
| 648 | 777 | ||
| 649 | /* 2. Now we sprintf our header so that sprintf writes its terminating '\0' exactly one byte before content starts. Complete | 778 | /* 2. Now we sprintf our header so that sprintf writes its terminating '\0' exactly one byte before content starts. Complete |
| 650 | packet size is increased by size of header plus one byte '\n', we will copy over '\0' in next step */ | 779 | packet size is increased by size of header plus one byte '\n', we will copy over '\0' in next step */ |
| 651 | ws->reply_size += 1 + sprintf( ws->reply, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r", ws->reply_size ); | 780 | ws->reply_size += 1 + sprintf(ws->reply, "HTTP/1.1 200 OK\r\nContent-Type: text/plain\r\nContent-Length: %zd\r\n\r", ws->reply_size); |
| 652 | 781 | ||
| 653 | /* 3. Finally we join both blocks neatly */ | 782 | /* 3. Finally we join both blocks neatly */ |
| 654 | ws->outbuf[ SUCCESS_HTTP_HEADER_LENGTH - 1 ] = '\n'; | 783 | ws->outbuf[SUCCESS_HTTP_HEADER_LENGTH - 1] = '\n'; |
| 655 | 784 | ||
| 656 | http_senddata( sock, ws ); | 785 | http_senddata(sock, ws); |
| 657 | return ws->reply_size; | 786 | return ws->reply_size; |
| 658 | } | 787 | } |
| 659 | |||
| 660 | const char *g_version_http_c = "$Source$: $Revision$\n"; | ||
| @@ -7,9 +7,12 @@ | |||
| 7 | #define OT_HTTP_H__ | 7 | #define OT_HTTP_H__ |
| 8 | 8 | ||
| 9 | typedef enum { | 9 | typedef enum { |
| 10 | STRUCT_HTTP_FLAG_WAITINGFORTASK = 1, | 10 | STRUCT_HTTP_FLAG_WAITINGFORTASK = 1, |
| 11 | STRUCT_HTTP_FLAG_GZIP = 2, | 11 | STRUCT_HTTP_FLAG_GZIP = 2, |
| 12 | STRUCT_HTTP_FLAG_BZIP2 = 4 | 12 | STRUCT_HTTP_FLAG_BZIP2 = 4, |
| 13 | STRUCT_HTTP_FLAG_ZSTD = 8, | ||
| 14 | STRUCT_HTTP_FLAG_CHUNKED = 16, | ||
| 15 | STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER = 32 | ||
| 13 | } STRUCT_HTTP_FLAG; | 16 | } STRUCT_HTTP_FLAG; |
| 14 | 17 | ||
| 15 | struct http_data { | 18 | struct http_data { |
| @@ -20,9 +23,9 @@ struct http_data { | |||
| 20 | STRUCT_HTTP_FLAG flag; | 23 | STRUCT_HTTP_FLAG flag; |
| 21 | }; | 24 | }; |
| 22 | 25 | ||
| 23 | ssize_t http_handle_request( const int64 s, struct ot_workstruct *ws ); | 26 | ssize_t http_handle_request(const int64 s, struct ot_workstruct *ws); |
| 24 | ssize_t http_sendiovecdata( const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector ); | 27 | ssize_t http_sendiovecdata(const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial); |
| 25 | ssize_t http_issue_error( const int64 s, struct ot_workstruct *ws, int code ); | 28 | ssize_t http_issue_error(const int64 s, struct ot_workstruct *ws, int code); |
| 26 | 29 | ||
| 27 | extern char *g_stats_path; | 30 | extern char *g_stats_path; |
| 28 | extern ssize_t g_stats_path_len; | 31 | extern ssize_t g_stats_path_len; |
| @@ -4,73 +4,89 @@ | |||
| 4 | $id$ */ | 4 | $id$ */ |
| 5 | 5 | ||
| 6 | /* System */ | 6 | /* System */ |
| 7 | #include <sys/types.h> | ||
| 8 | #include <stdlib.h> | 7 | #include <stdlib.h> |
| 9 | #include <unistd.h> | 8 | #include <sys/types.h> |
| 10 | #include <sys/uio.h> | 9 | #include <sys/uio.h> |
| 10 | #include <unistd.h> | ||
| 11 | 11 | ||
| 12 | /* Libowfat */ | 12 | /* Libowfat */ |
| 13 | 13 | ||
| 14 | /* Opentracker */ | 14 | /* Opentracker */ |
| 15 | #include "ot_iovec.h" | 15 | #include "ot_iovec.h" |
| 16 | 16 | ||
| 17 | void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc ) { | 17 | void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc) { |
| 18 | void *new_data; | 18 | void *new_data; |
| 19 | int new_entries = 1 + *iovec_entries; | 19 | int new_entries = 1 + *iovec_entries; |
| 20 | struct iovec *new_vec = realloc( *iovector, new_entries * sizeof( struct iovec ) ); | 20 | struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec)); |
| 21 | 21 | ||
| 22 | if( !new_vec ) | 22 | if (!new_vec) |
| 23 | return NULL; | 23 | return NULL; |
| 24 | 24 | ||
| 25 | /* Only allocate after we have a place to store the pointer */ | 25 | /* Only allocate after we have a place to store the pointer */ |
| 26 | new_data = malloc( new_alloc ); | 26 | new_data = malloc(new_alloc); |
| 27 | if( !new_data ) | 27 | if (!new_data) |
| 28 | return NULL; | 28 | return NULL; |
| 29 | 29 | ||
| 30 | new_vec[new_entries - 1].iov_base = new_data; | 30 | new_vec[new_entries - 1].iov_base = new_data; |
| 31 | new_vec[new_entries - 1].iov_len = new_alloc; | 31 | new_vec[new_entries - 1].iov_len = new_alloc; |
| 32 | 32 | ||
| 33 | *iovector = new_vec; | 33 | *iovector = new_vec; |
| 34 | ++*iovec_entries; | 34 | ++*iovec_entries; |
| 35 | return new_data; | 35 | return new_data; |
| 36 | } | 36 | } |
| 37 | 37 | ||
| 38 | void iovec_free( int *iovec_entries, struct iovec **iovector ) { | 38 | void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector) { |
| 39 | int new_entries = *iovec_entries + 1; | ||
| 40 | struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec)); | ||
| 41 | if (!new_vec) | ||
| 42 | return NULL; | ||
| 43 | |||
| 44 | /* Take over data from appended iovec */ | ||
| 45 | new_vec[*iovec_entries].iov_base = append_iovector->iov_base; | ||
| 46 | new_vec[*iovec_entries].iov_len = append_iovector->iov_len; | ||
| 47 | |||
| 48 | append_iovector->iov_base = NULL; | ||
| 49 | append_iovector->iov_len = 0; | ||
| 50 | |||
| 51 | *iovector = new_vec; | ||
| 52 | *iovec_entries = new_entries; | ||
| 53 | |||
| 54 | return new_vec; | ||
| 55 | } | ||
| 56 | |||
| 57 | void iovec_free(int *iovec_entries, struct iovec **iovector) { | ||
| 39 | int i; | 58 | int i; |
| 40 | for( i=0; i<*iovec_entries; ++i ) | 59 | for (i = 0; i < *iovec_entries; ++i) |
| 41 | free( ((*iovector)[i]).iov_base ); | 60 | free(((*iovector)[i]).iov_base); |
| 42 | *iovector = NULL; | 61 | *iovector = NULL; |
| 43 | *iovec_entries = 0; | 62 | *iovec_entries = 0; |
| 44 | } | 63 | } |
| 45 | 64 | ||
| 46 | void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr ) { | 65 | void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr) { |
| 47 | if( *iovec_entries ) { | 66 | if (*iovec_entries) { |
| 48 | char * base = (char*)((*iovector)[ *iovec_entries - 1 ]).iov_base; | 67 | char *base = (char *)((*iovector)[*iovec_entries - 1]).iov_base; |
| 49 | size_t new_alloc = ((char*)last_ptr) - base; | 68 | size_t new_alloc = ((char *)last_ptr) - base; |
| 50 | 69 | ||
| 51 | ((*iovector)[*iovec_entries - 1 ]).iov_base = realloc( base, new_alloc ); | 70 | ((*iovector)[*iovec_entries - 1]).iov_base = realloc(base, new_alloc); |
| 52 | ((*iovector)[*iovec_entries - 1 ]).iov_len = new_alloc; | 71 | ((*iovector)[*iovec_entries - 1]).iov_len = new_alloc; |
| 53 | } | 72 | } |
| 54 | } | 73 | } |
| 55 | 74 | ||
| 56 | void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc ) { | 75 | void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc) { |
| 57 | void *new_data; | 76 | void *new_data; |
| 58 | 77 | ||
| 59 | iovec_fixlast( iovec_entries, iovector, last_ptr ); | 78 | iovec_fixlast(iovec_entries, iovector, last_ptr); |
| 60 | 79 | ||
| 61 | if( !( new_data = iovec_increase( iovec_entries, iovector, new_alloc ) ) ) | 80 | if (!(new_data = iovec_increase(iovec_entries, iovector, new_alloc))) |
| 62 | iovec_free( iovec_entries, iovector ); | 81 | iovec_free(iovec_entries, iovector); |
| 63 | 82 | ||
| 64 | return new_data; | 83 | return new_data; |
| 65 | } | 84 | } |
| 66 | 85 | ||
| 67 | 86 | size_t iovec_length(const int *iovec_entries, const struct iovec **iovector) { | |
| 68 | size_t iovec_length( const int *iovec_entries, const struct iovec **iovector ) { | ||
| 69 | size_t length = 0; | 87 | size_t length = 0; |
| 70 | int i; | 88 | int i; |
| 71 | for( i=0; i<*iovec_entries; ++i ) | 89 | for (i = 0; i < *iovec_entries; ++i) |
| 72 | length += ((*iovector)[i]).iov_len; | 90 | length += ((*iovector)[i]).iov_len; |
| 73 | return length; | 91 | return length; |
| 74 | } | 92 | } |
| 75 | |||
| 76 | const char *g_version_iovec_c = "$Source$: $Revision$\n"; | ||
| @@ -8,12 +8,13 @@ | |||
| 8 | 8 | ||
| 9 | #include <sys/uio.h> | 9 | #include <sys/uio.h> |
| 10 | 10 | ||
| 11 | void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc ); | 11 | void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc); |
| 12 | void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr ); | 12 | void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector); |
| 13 | void iovec_free( int *iovec_entries, struct iovec **iovector ); | 13 | void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr); |
| 14 | void iovec_free(int *iovec_entries, struct iovec **iovector); | ||
| 14 | 15 | ||
| 15 | size_t iovec_length( const int *iovec_entries, const struct iovec **iovector ); | 16 | size_t iovec_length(const int *iovec_entries, const struct iovec **iovector); |
| 16 | 17 | ||
| 17 | void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc ); | 18 | void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc); |
| 18 | 19 | ||
| 19 | #endif | 20 | #endif |
diff --git a/ot_livesync.c b/ot_livesync.c index 75a5f9f..269b8d8 100644 --- a/ot_livesync.c +++ b/ot_livesync.c | |||
| @@ -4,204 +4,228 @@ | |||
| 4 | $id$ */ | 4 | $id$ */ |
| 5 | 5 | ||
| 6 | /* System */ | 6 | /* System */ |
| 7 | #include <pthread.h> | ||
| 8 | #include <stdlib.h> | ||
| 9 | #include <string.h> | ||
| 7 | #include <sys/types.h> | 10 | #include <sys/types.h> |
| 8 | #include <sys/uio.h> | 11 | #include <sys/uio.h> |
| 9 | #include <string.h> | ||
| 10 | #include <pthread.h> | ||
| 11 | #include <unistd.h> | 12 | #include <unistd.h> |
| 12 | #include <stdlib.h> | ||
| 13 | 13 | ||
| 14 | /* Libowfat */ | 14 | /* Libowfat */ |
| 15 | #include "socket.h" | ||
| 16 | #include "ndelay.h" | ||
| 17 | #include "byte.h" | 15 | #include "byte.h" |
| 18 | #include "ip6.h" | 16 | #include "ip6.h" |
| 17 | #include "ndelay.h" | ||
| 18 | #include "socket.h" | ||
| 19 | 19 | ||
| 20 | /* Opentracker */ | 20 | /* Opentracker */ |
| 21 | #include "trackerlogic.h" | ||
| 22 | #include "ot_livesync.h" | ||
| 23 | #include "ot_accesslist.h" | 21 | #include "ot_accesslist.h" |
| 24 | #include "ot_stats.h" | 22 | #include "ot_livesync.h" |
| 25 | #include "ot_mutex.h" | 23 | #include "ot_mutex.h" |
| 24 | #include "ot_stats.h" | ||
| 25 | #include "trackerlogic.h" | ||
| 26 | 26 | ||
| 27 | #ifdef WANT_SYNC_LIVE | 27 | #ifdef WANT_SYNC_LIVE |
| 28 | 28 | ||
| 29 | char groupip_1[4] = { 224,0,23,5 }; | 29 | char groupip_1[4] = {224, 0, 23, 5}; |
| 30 | 30 | ||
| 31 | #define LIVESYNC_INCOMING_BUFFSIZE (256*256) | 31 | #define LIVESYNC_INCOMING_BUFFSIZE (256 * 256) |
| 32 | 32 | ||
| 33 | #define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480 | 33 | #define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480 |
| 34 | #define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer)+sizeof(ot_hash)) | 34 | #define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer) + sizeof(ot_hash)) |
| 35 | 35 | ||
| 36 | #define LIVESYNC_MAXDELAY 15 /* seconds */ | 36 | #define LIVESYNC_MAXDELAY 15 /* seconds */ |
| 37 | 37 | ||
| 38 | enum { OT_SYNC_PEER }; | 38 | enum { OT_SYNC_PEER4, OT_SYNC_PEER6 }; |
| 39 | 39 | ||
| 40 | /* Forward declaration */ | 40 | /* Forward declaration */ |
| 41 | static void * livesync_worker( void * args ); | 41 | static void *livesync_worker(void *args); |
| 42 | 42 | ||
| 43 | /* For outgoing packets */ | 43 | /* For outgoing packets */ |
| 44 | static int64 g_socket_in = -1; | 44 | static int64 g_socket_in = -1; |
| 45 | 45 | ||
| 46 | /* For incoming packets */ | 46 | /* For incoming packets */ |
| 47 | static int64 g_socket_out = -1; | 47 | static int64 g_socket_out = -1; |
| 48 | 48 | ||
| 49 | static pthread_mutex_t g_outbuf_mutex = PTHREAD_MUTEX_INITIALIZER; | 49 | static pthread_mutex_t g_outbuf_mutex = PTHREAD_MUTEX_INITIALIZER; |
| 50 | char g_outbuf[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; | 50 | typedef struct { |
| 51 | static size_t g_outbuf_data; | 51 | uint8_t data[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; |
| 52 | static ot_time g_next_packet_time; | 52 | size_t fill; |
| 53 | ot_time next_packet_time; | ||
| 54 | } sync_buffer; | ||
| 53 | 55 | ||
| 54 | static pthread_t thread_id; | 56 | static sync_buffer g_v6_buf; |
| 55 | void livesync_init( ) { | 57 | static sync_buffer g_v4_buf; |
| 56 | 58 | ||
| 57 | if( g_socket_in == -1 ) | 59 | static pthread_t thread_id; |
| 58 | exerr( "No socket address for live sync specified." ); | 60 | void livesync_init() { |
| 61 | |||
| 62 | if (g_socket_in == -1) | ||
| 63 | exerr("No socket address for live sync specified."); | ||
| 59 | 64 | ||
| 60 | /* Prepare outgoing peers buffer */ | 65 | /* Prepare outgoing peers buffer */ |
| 61 | memcpy( g_outbuf, &g_tracker_id, sizeof( g_tracker_id ) ); | 66 | memcpy(g_v6_buf.data, &g_tracker_id, sizeof(g_tracker_id)); |
| 62 | uint32_pack_big( g_outbuf + sizeof( g_tracker_id ), OT_SYNC_PEER); | 67 | memcpy(g_v4_buf.data, &g_tracker_id, sizeof(g_tracker_id)); |
| 63 | g_outbuf_data = sizeof( g_tracker_id ) + sizeof( uint32_t ); | 68 | |
| 69 | uint32_pack_big((char *)g_v6_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER6); | ||
| 70 | uint32_pack_big((char *)g_v4_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER4); | ||
| 71 | |||
| 72 | g_v6_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t); | ||
| 73 | g_v4_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t); | ||
| 64 | 74 | ||
| 65 | g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; | 75 | g_v6_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; |
| 76 | g_v4_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; | ||
| 66 | 77 | ||
| 67 | pthread_create( &thread_id, NULL, livesync_worker, NULL ); | 78 | pthread_create(&thread_id, NULL, livesync_worker, NULL); |
| 68 | } | 79 | } |
| 69 | 80 | ||
| 70 | void livesync_deinit() { | 81 | void livesync_deinit() { |
| 71 | if( g_socket_in != -1 ) | 82 | if (g_socket_in != -1) |
| 72 | close( g_socket_in ); | 83 | close(g_socket_in); |
| 73 | if( g_socket_out != -1 ) | 84 | if (g_socket_out != -1) |
| 74 | close( g_socket_out ); | 85 | close(g_socket_out); |
| 75 | 86 | ||
| 76 | pthread_cancel( thread_id ); | 87 | pthread_cancel(thread_id); |
| 77 | } | 88 | } |
| 78 | 89 | ||
| 79 | void livesync_bind_mcast( ot_ip6 ip, uint16_t port) { | 90 | void livesync_bind_mcast(ot_ip6 ip, uint16_t port) { |
| 80 | char tmpip[4] = {0,0,0,0}; | 91 | char tmpip[4] = {0, 0, 0, 0}; |
| 81 | char *v4ip; | 92 | char *v4ip; |
| 82 | 93 | ||
| 83 | if( !ip6_isv4mapped(ip)) | 94 | if (!ip6_isv4mapped(ip)) |
| 84 | exerr("v6 mcast support not yet available."); | 95 | exerr("v6 mcast support not yet available."); |
| 85 | v4ip = ip+12; | 96 | v4ip = ip + 12; |
| 86 | 97 | ||
| 87 | if( g_socket_in != -1 ) | 98 | if (g_socket_in != -1) |
| 88 | exerr("Error: Livesync listen ip specified twice."); | 99 | exerr("Error: Livesync listen ip specified twice."); |
| 89 | 100 | ||
| 90 | if( ( g_socket_in = socket_udp4( )) < 0) | 101 | if ((g_socket_in = socket_udp4()) < 0) |
| 91 | exerr("Error: Cant create live sync incoming socket." ); | 102 | exerr("Error: Cant create live sync incoming socket."); |
| 92 | ndelay_off(g_socket_in); | 103 | ndelay_off(g_socket_in); |
| 93 | 104 | ||
| 94 | if( socket_bind4_reuse( g_socket_in, tmpip, port ) == -1 ) | 105 | if (socket_bind4_reuse(g_socket_in, tmpip, port) == -1) |
| 95 | exerr("Error: Cant bind live sync incoming socket." ); | 106 | exerr("Error: Cant bind live sync incoming socket."); |
| 96 | 107 | ||
| 97 | if( socket_mcjoin4( g_socket_in, groupip_1, v4ip ) ) | 108 | if (socket_mcjoin4(g_socket_in, groupip_1, v4ip)) |
| 98 | exerr("Error: Cant make live sync incoming socket join mcast group."); | 109 | exerr("Error: Cant make live sync incoming socket join mcast group."); |
| 99 | 110 | ||
| 100 | if( ( g_socket_out = socket_udp4()) < 0) | 111 | if ((g_socket_out = socket_udp4()) < 0) |
| 101 | exerr("Error: Cant create live sync outgoing socket." ); | 112 | exerr("Error: Cant create live sync outgoing socket."); |
| 102 | if( socket_bind4_reuse( g_socket_out, v4ip, port ) == -1 ) | 113 | if (socket_bind4_reuse(g_socket_out, v4ip, port) == -1) |
| 103 | exerr("Error: Cant bind live sync outgoing socket." ); | 114 | exerr("Error: Cant bind live sync outgoing socket."); |
| 104 | 115 | ||
| 105 | socket_mcttl4(g_socket_out, 1); | 116 | socket_mcttl4(g_socket_out, 1); |
| 106 | socket_mcloop4(g_socket_out, 0); | 117 | socket_mcloop4(g_socket_out, 0); |
| 107 | } | 118 | } |
| 108 | 119 | ||
| 109 | /* Caller MUST hold g_outbuf_mutex. Returns with g_outbuf_mutex unlocked */ | 120 | /* Caller MUST hold g_outbuf_mutex. Returns with g_outbuf_mutex unlocked */ |
| 110 | static void livesync_issue_peersync( ) { | 121 | static void livesync_issue_peersync(sync_buffer *buf) { |
| 111 | char mycopy[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; | 122 | char mycopy[LIVESYNC_OUTGOING_BUFFSIZE_PEERS]; |
| 112 | size_t data = g_outbuf_data; | 123 | size_t fill = buf->fill; |
| 113 | 124 | ||
| 114 | memcpy( mycopy, g_outbuf, data ); | 125 | memcpy(mycopy, buf->data, fill); |
| 115 | g_outbuf_data = sizeof( g_tracker_id ) + sizeof( uint32_t ); | 126 | buf->fill = sizeof(g_tracker_id) + sizeof(uint32_t); |
| 116 | g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; | 127 | buf->next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY; |
| 117 | 128 | ||
| 118 | /* From now this thread has a local copy of the buffer and | 129 | /* From now this thread has a local copy of the buffer and |
| 119 | has modified the protected element */ | 130 | has modified the protected element */ |
| 120 | pthread_mutex_unlock(&g_outbuf_mutex); | 131 | pthread_mutex_unlock(&g_outbuf_mutex); |
| 121 | 132 | ||
| 122 | socket_send4(g_socket_out, mycopy, data, groupip_1, LIVESYNC_PORT); | 133 | socket_send4(g_socket_out, mycopy, fill, groupip_1, LIVESYNC_PORT); |
| 123 | } | 134 | } |
| 124 | 135 | ||
| 125 | static void livesync_handle_peersync( struct ot_workstruct *ws ) { | 136 | static void livesync_handle_peersync(struct ot_workstruct *ws, size_t peer_size) { |
| 126 | int off = sizeof( g_tracker_id ) + sizeof( uint32_t ); | 137 | size_t off = sizeof(g_tracker_id) + sizeof(uint32_t); |
| 127 | 138 | ||
| 128 | /* Now basic sanity checks have been done on the live sync packet | 139 | /* Now basic sanity checks have been done on the live sync packet |
| 129 | We might add more testing and logging. */ | 140 | We might add more testing and logging. */ |
| 130 | while( off + (ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ) <= ws->request_size ) { | 141 | while ((ssize_t)(off + sizeof(ot_hash) + peer_size) <= ws->request_size) { |
| 131 | memcpy( &ws->peer, ws->request + off + sizeof(ot_hash), sizeof( ot_peer ) ); | 142 | memcpy(&ws->peer, ws->request + off + sizeof(ot_hash), peer_size); |
| 132 | ws->hash = (ot_hash*)(ws->request + off); | 143 | ws->hash = (ot_hash *)(ws->request + off); |
| 133 | 144 | ||
| 134 | if( !g_opentracker_running ) return; | 145 | if (!g_opentracker_running) |
| 146 | return; | ||
| 135 | 147 | ||
| 136 | if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_STOPPED ) | 148 | if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED) |
| 137 | remove_peer_from_torrent( FLAG_MCA, ws ); | 149 | remove_peer_from_torrent(FLAG_MCA, ws); |
| 138 | else | 150 | else |
| 139 | add_peer_to_torrent_and_return_peers( FLAG_MCA, ws, /* amount = */ 0 ); | 151 | add_peer_to_torrent_and_return_peers(FLAG_MCA, ws, /* amount = */ 0); |
| 140 | 152 | ||
| 141 | off += sizeof( ot_hash ) + sizeof( ot_peer ); | 153 | off += sizeof(ot_hash) + peer_size; |
| 142 | } | 154 | } |
| 143 | 155 | ||
| 144 | stats_issue_event(EVENT_SYNC, 0, | 156 | stats_issue_event(EVENT_SYNC, 0, (ws->request_size - sizeof(g_tracker_id) - sizeof(uint32_t)) / ((ssize_t)sizeof(ot_hash) + peer_size)); |
| 145 | (ws->request_size - sizeof( g_tracker_id ) - sizeof( uint32_t ) ) / | ||
| 146 | ((ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ))); | ||
| 147 | } | 157 | } |
| 148 | 158 | ||
| 149 | /* Tickle the live sync module from time to time, so no events get | 159 | /* Tickle the live sync module from time to time, so no events get |
| 150 | stuck when there's not enough traffic to fill udp packets fast | 160 | stuck when there's not enough traffic to fill udp packets fast |
| 151 | enough */ | 161 | enough */ |
| 152 | void livesync_ticker( ) { | 162 | void livesync_ticker() { |
| 153 | /* livesync_issue_peersync sets g_next_packet_time */ | 163 | /* livesync_issue_peersync sets g_next_packet_time */ |
| 154 | pthread_mutex_lock(&g_outbuf_mutex); | 164 | pthread_mutex_lock(&g_outbuf_mutex); |
| 155 | if( g_now_seconds > g_next_packet_time && | 165 | if (g_now_seconds > g_v6_buf.next_packet_time && g_v6_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t)) |
| 156 | g_outbuf_data > sizeof( g_tracker_id ) + sizeof( uint32_t ) ) | 166 | livesync_issue_peersync(&g_v6_buf); |
| 157 | livesync_issue_peersync(); | 167 | else |
| 168 | pthread_mutex_unlock(&g_outbuf_mutex); | ||
| 169 | |||
| 170 | pthread_mutex_lock(&g_outbuf_mutex); | ||
| 171 | if (g_now_seconds > g_v4_buf.next_packet_time && g_v4_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t)) | ||
| 172 | livesync_issue_peersync(&g_v4_buf); | ||
| 158 | else | 173 | else |
| 159 | pthread_mutex_unlock(&g_outbuf_mutex); | 174 | pthread_mutex_unlock(&g_outbuf_mutex); |
| 160 | } | 175 | } |
| 161 | 176 | ||
| 162 | /* Inform live sync about whats going on. */ | 177 | /* Inform live sync about whats going on. */ |
| 163 | void livesync_tell( struct ot_workstruct *ws ) { | 178 | void livesync_tell(struct ot_workstruct *ws) { |
| 179 | size_t peer_size; /* initialized in next line */ | ||
| 180 | ot_peer *peer_src = peer_from_peer6(&ws->peer, &peer_size); | ||
| 181 | sync_buffer *dest_buf = peer_size == OT_PEER_SIZE6 ? &g_v6_buf : &g_v4_buf; | ||
| 182 | |||
| 164 | pthread_mutex_lock(&g_outbuf_mutex); | 183 | pthread_mutex_lock(&g_outbuf_mutex); |
| 165 | 184 | ||
| 166 | memcpy( g_outbuf + g_outbuf_data, ws->hash, sizeof(ot_hash) ); | 185 | memcpy(dest_buf->data + dest_buf->fill, ws->hash, sizeof(ot_hash)); |
| 167 | memcpy( g_outbuf + g_outbuf_data + sizeof(ot_hash), &ws->peer, sizeof(ot_peer) ); | 186 | dest_buf->fill += sizeof(ot_hash); |
| 168 | 187 | ||
| 169 | g_outbuf_data += sizeof(ot_hash) + sizeof(ot_peer); | 188 | memcpy(dest_buf->data + dest_buf->fill, peer_src, peer_size); |
| 189 | dest_buf->fill += peer_size; | ||
| 170 | 190 | ||
| 171 | if( g_outbuf_data >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS ) | 191 | if (dest_buf->fill >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS) |
| 172 | livesync_issue_peersync(); | 192 | livesync_issue_peersync(dest_buf); |
| 173 | else | 193 | else |
| 174 | pthread_mutex_unlock(&g_outbuf_mutex); | 194 | pthread_mutex_unlock(&g_outbuf_mutex); |
| 175 | } | 195 | } |
| 176 | 196 | ||
| 177 | static void * livesync_worker( void * args ) { | 197 | static void *livesync_worker(void *args) { |
| 178 | struct ot_workstruct ws; | 198 | struct ot_workstruct ws; |
| 179 | ot_ip6 in_ip; uint16_t in_port; | 199 | ot_ip6 in_ip; |
| 200 | uint16_t in_port; | ||
| 180 | 201 | ||
| 181 | (void)args; | 202 | (void)args; |
| 182 | 203 | ||
| 183 | /* Initialize our "thread local storage" */ | 204 | /* Initialize our "thread local storage" */ |
| 184 | ws.inbuf = ws.request = malloc( LIVESYNC_INCOMING_BUFFSIZE ); | 205 | ws.inbuf = ws.request = malloc(LIVESYNC_INCOMING_BUFFSIZE); |
| 185 | ws.outbuf = ws.reply = 0; | 206 | ws.outbuf = ws.reply = 0; |
| 186 | 207 | ||
| 187 | memcpy( in_ip, V4mappedprefix, sizeof( V4mappedprefix ) ); | 208 | memcpy(in_ip, V4mappedprefix, sizeof(V4mappedprefix)); |
| 188 | 209 | ||
| 189 | while( 1 ) { | 210 | while (1) { |
| 190 | ws.request_size = socket_recv4(g_socket_in, (char*)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12+(char*)in_ip, &in_port); | 211 | ws.request_size = socket_recv4(g_socket_in, (char *)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12 + (char *)in_ip, &in_port); |
| 191 | 212 | ||
| 192 | /* Expect at least tracker id and packet type */ | 213 | /* Expect at least tracker id and packet type */ |
| 193 | if( ws.request_size <= (ssize_t)(sizeof( g_tracker_id ) + sizeof( uint32_t )) ) | 214 | if (ws.request_size <= (ssize_t)(sizeof(g_tracker_id) + sizeof(uint32_t))) |
| 194 | continue; | 215 | continue; |
| 195 | if( !accesslist_is_blessed(in_ip, OT_PERMISSION_MAY_LIVESYNC)) | 216 | if (!accesslist_is_blessed(in_ip, OT_PERMISSION_MAY_LIVESYNC)) |
| 196 | continue; | 217 | continue; |
| 197 | if( !memcmp( ws.inbuf, &g_tracker_id, sizeof( g_tracker_id ) ) ) { | 218 | if (!memcmp(ws.inbuf, &g_tracker_id, sizeof(g_tracker_id))) { |
| 198 | /* TODO: log packet coming from ourselves */ | 219 | /* TODO: log packet coming from ourselves */ |
| 199 | continue; | 220 | continue; |
| 200 | } | 221 | } |
| 201 | 222 | ||
| 202 | switch( uint32_read_big( sizeof( g_tracker_id ) + (char *)ws.inbuf ) ) { | 223 | switch (uint32_read_big(sizeof(g_tracker_id) + (char *)ws.inbuf)) { |
| 203 | case OT_SYNC_PEER: | 224 | case OT_SYNC_PEER6: |
| 204 | livesync_handle_peersync( &ws ); | 225 | livesync_handle_peersync(&ws, OT_PEER_SIZE6); |
| 226 | break; | ||
| 227 | case OT_SYNC_PEER4: | ||
| 228 | livesync_handle_peersync(&ws, OT_PEER_SIZE4); | ||
| 205 | break; | 229 | break; |
| 206 | default: | 230 | default: |
| 207 | break; | 231 | break; |
| @@ -213,4 +237,3 @@ static void * livesync_worker( void * args ) { | |||
| 213 | } | 237 | } |
| 214 | 238 | ||
| 215 | #endif | 239 | #endif |
| 216 | const char *g_version_livesync_c = "$Source$: $Revision$\n"; | ||
diff --git a/ot_livesync.h b/ot_livesync.h index d7490e5..cb28774 100644 --- a/ot_livesync.h +++ b/ot_livesync.h | |||
| @@ -28,13 +28,19 @@ | |||
| 28 | Each tracker instance accumulates announce requests until its buffer is | 28 | Each tracker instance accumulates announce requests until its buffer is |
| 29 | full or a timeout is reached. Then it broadcasts its live sync packer: | 29 | full or a timeout is reached. Then it broadcasts its live sync packer: |
| 30 | 30 | ||
| 31 | packet type SYNC_LIVE | 31 | packet type SYNC_LIVE4 |
| 32 | [ 0x0008 0x14 info_hash | 32 | [ 0x0008 0x14 info_hash |
| 33 | 0x001c 0x04 peer's ipv4 address | 33 | 0x001c 0x04 peer's ipv4 address |
| 34 | 0x0020 0x02 peer's port | 34 | 0x0020 0x02 peer's port |
| 35 | 0x0024 0x02 peer flags v1 ( SEEDING = 0x80, COMPLETE = 0x40, STOPPED = 0x20 ) | 35 | 0x0024 0x02 peer flags v1 ( SEEDING = 0x80, COMPLETE = 0x40, STOPPED = 0x20 ) |
| 36 | ]* | 36 | ]* |
| 37 | 37 | ||
| 38 | packet type SYNC_LIVE6 | ||
| 39 | [ 0x0008 0x14 info_hash | ||
| 40 | 0x001c 0x10 peer's ipv6 address | ||
| 41 | 0x002c 0x02 peer's port | ||
| 42 | 0x002e 0x02 peer flags v1 ( SEEDING = 0x80, COMPLETE = 0x40, STOPPED = 0x20 ) | ||
| 43 | ]* | ||
| 38 | */ | 44 | */ |
| 39 | 45 | ||
| 40 | #ifdef WANT_SYNC_LIVE | 46 | #ifdef WANT_SYNC_LIVE |
| @@ -45,18 +51,18 @@ void livesync_init(); | |||
| 45 | void livesync_deinit(); | 51 | void livesync_deinit(); |
| 46 | 52 | ||
| 47 | /* Join multicast group for listening and create sending socket */ | 53 | /* Join multicast group for listening and create sending socket */ |
| 48 | void livesync_bind_mcast( char *ip, uint16_t port ); | 54 | void livesync_bind_mcast(char *ip, uint16_t port); |
| 49 | 55 | ||
| 50 | /* Inform live sync about whats going on. */ | 56 | /* Inform live sync about whats going on. */ |
| 51 | void livesync_tell( struct ot_workstruct *ws ); | 57 | void livesync_tell(struct ot_workstruct *ws); |
| 52 | 58 | ||
| 53 | /* Tickle the live sync module from time to time, so no events get | 59 | /* Tickle the live sync module from time to time, so no events get |
| 54 | stuck when there's not enough traffic to fill udp packets fast | 60 | stuck when there's not enough traffic to fill udp packets fast |
| 55 | enough */ | 61 | enough */ |
| 56 | void livesync_ticker( ); | 62 | void livesync_ticker(); |
| 57 | 63 | ||
| 58 | /* Handle an incoming live sync packet */ | 64 | /* Handle an incoming live sync packet */ |
| 59 | void handle_livesync( const int64 sock ); | 65 | void handle_livesync(const int64 sock); |
| 60 | 66 | ||
| 61 | #else | 67 | #else |
| 62 | 68 | ||
| @@ -16,42 +16,39 @@ | |||
| 16 | #include "uint32.h" | 16 | #include "uint32.h" |
| 17 | 17 | ||
| 18 | /* Opentracker */ | 18 | /* Opentracker */ |
| 19 | #include "trackerlogic.h" | 19 | #include "ot_iovec.h" |
| 20 | #include "ot_mutex.h" | 20 | #include "ot_mutex.h" |
| 21 | #include "ot_stats.h" | 21 | #include "ot_stats.h" |
| 22 | #include "trackerlogic.h" | ||
| 22 | 23 | ||
| 23 | /* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */ | 24 | /* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */ |
| 24 | #define MTX_DBG( STRING ) | 25 | #define MTX_DBG(STRING) |
| 25 | 26 | ||
| 26 | /* Our global all torrents list */ | 27 | /* Our global all torrents list */ |
| 27 | static ot_vector all_torrents[OT_BUCKET_COUNT]; | 28 | static ot_vector all_torrents[OT_BUCKET_COUNT]; |
| 28 | static pthread_mutex_t bucket_mutex[OT_BUCKET_COUNT]; | 29 | static pthread_mutex_t bucket_mutex[OT_BUCKET_COUNT]; |
| 29 | static size_t g_torrent_count; | 30 | static size_t g_torrent_count; |
| 30 | 31 | ||
| 31 | /* Self pipe from opentracker.c */ | 32 | /* Self pipe from opentracker.c */ |
| 32 | extern int g_self_pipe[2]; | 33 | extern int g_self_pipe[2]; |
| 33 | 34 | ||
| 34 | ot_vector *mutex_bucket_lock( int bucket ) { | 35 | ot_vector *mutex_bucket_lock(int bucket) { |
| 35 | pthread_mutex_lock(bucket_mutex + bucket ); | 36 | pthread_mutex_lock(bucket_mutex + bucket); |
| 36 | return all_torrents + bucket; | 37 | return all_torrents + bucket; |
| 37 | } | 38 | } |
| 38 | 39 | ||
| 39 | ot_vector *mutex_bucket_lock_by_hash( ot_hash hash ) { | 40 | ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash) { return mutex_bucket_lock(uint32_read_big((const char *)hash) >> OT_BUCKET_COUNT_SHIFT); } |
| 40 | return mutex_bucket_lock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT ); | ||
| 41 | } | ||
| 42 | 41 | ||
| 43 | void mutex_bucket_unlock( int bucket, int delta_torrentcount ) { | 42 | void mutex_bucket_unlock(int bucket, int delta_torrentcount) { |
| 44 | pthread_mutex_unlock(bucket_mutex + bucket); | 43 | pthread_mutex_unlock(bucket_mutex + bucket); |
| 45 | g_torrent_count += delta_torrentcount; | 44 | g_torrent_count += delta_torrentcount; |
| 46 | } | 45 | } |
| 47 | 46 | ||
| 48 | void mutex_bucket_unlock_by_hash( ot_hash hash, int delta_torrentcount ) { | 47 | void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount) { |
| 49 | mutex_bucket_unlock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount ); | 48 | mutex_bucket_unlock(uint32_read_big((char *)hash) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount); |
| 50 | } | 49 | } |
| 51 | 50 | ||
| 52 | size_t mutex_get_torrent_count( ) { | 51 | size_t mutex_get_torrent_count() { return g_torrent_count; } |
| 53 | return g_torrent_count; | ||
| 54 | } | ||
| 55 | 52 | ||
| 56 | /* TaskQueue Magic */ | 53 | /* TaskQueue Magic */ |
| 57 | 54 | ||
| @@ -64,16 +61,16 @@ struct ot_task { | |||
| 64 | struct ot_task *next; | 61 | struct ot_task *next; |
| 65 | }; | 62 | }; |
| 66 | 63 | ||
| 67 | static ot_taskid next_free_taskid = 1; | 64 | static ot_taskid next_free_taskid = 1; |
| 68 | static struct ot_task *tasklist; | 65 | static struct ot_task *tasklist; |
| 69 | static pthread_mutex_t tasklist_mutex; | 66 | static pthread_mutex_t tasklist_mutex; |
| 70 | static pthread_cond_t tasklist_being_filled; | 67 | static pthread_cond_t tasklist_being_filled; |
| 71 | 68 | ||
| 72 | int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) { | 69 | int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype) { |
| 73 | struct ot_task ** tmptask, * task; | 70 | struct ot_task **tmptask, *task; |
| 74 | 71 | ||
| 75 | task = malloc(sizeof( struct ot_task)); | 72 | task = malloc(sizeof(struct ot_task)); |
| 76 | if( !task ) | 73 | if (!task) |
| 77 | return -1; | 74 | return -1; |
| 78 | 75 | ||
| 79 | task->taskid = 0; | 76 | task->taskid = 0; |
| @@ -84,98 +81,98 @@ int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) { | |||
| 84 | task->next = 0; | 81 | task->next = 0; |
| 85 | 82 | ||
| 86 | /* Want exclusive access to tasklist */ | 83 | /* Want exclusive access to tasklist */ |
| 87 | pthread_mutex_lock( &tasklist_mutex ); | 84 | pthread_mutex_lock(&tasklist_mutex); |
| 88 | 85 | ||
| 89 | /* Skip to end of list */ | 86 | /* Skip to end of list */ |
| 90 | tmptask = &tasklist; | 87 | tmptask = &tasklist; |
| 91 | while( *tmptask ) | 88 | while (*tmptask) |
| 92 | tmptask = &(*tmptask)->next; | 89 | tmptask = &(*tmptask)->next; |
| 93 | *tmptask = task; | 90 | *tmptask = task; |
| 94 | 91 | ||
| 95 | /* Inform waiting workers and release lock */ | 92 | /* Inform waiting workers and release lock */ |
| 96 | pthread_cond_broadcast( &tasklist_being_filled ); | 93 | pthread_cond_broadcast(&tasklist_being_filled); |
| 97 | pthread_mutex_unlock( &tasklist_mutex ); | 94 | pthread_mutex_unlock(&tasklist_mutex); |
| 98 | return 0; | 95 | return 0; |
| 99 | } | 96 | } |
| 100 | 97 | ||
| 101 | void mutex_workqueue_canceltask( int64 sock ) { | 98 | void mutex_workqueue_canceltask(int64 sock) { |
| 102 | struct ot_task ** task; | 99 | struct ot_task **task; |
| 103 | 100 | ||
| 104 | /* Want exclusive access to tasklist */ | 101 | /* Want exclusive access to tasklist */ |
| 105 | pthread_mutex_lock( &tasklist_mutex ); | 102 | pthread_mutex_lock(&tasklist_mutex); |
| 106 | 103 | ||
| 107 | for (task = &tasklist; *task; task = &((*task)->next)) | 104 | for (task = &tasklist; *task; task = &((*task)->next)) |
| 108 | if ((*task)->sock == sock) { | 105 | if ((*task)->sock == sock) { |
| 109 | struct iovec *iovec = (*task)->iovec; | 106 | struct iovec *iovec = (*task)->iovec; |
| 110 | struct ot_task *ptask = *task; | 107 | struct ot_task *ptask = *task; |
| 111 | int i; | 108 | int i; |
| 112 | 109 | ||
| 113 | /* Free task's iovec */ | 110 | /* Free task's iovec */ |
| 114 | for( i=0; i<(*task)->iovec_entries; ++i ) | 111 | for (i = 0; i < (*task)->iovec_entries; ++i) |
| 115 | free( iovec[i].iov_base ); | 112 | free(iovec[i].iov_base); |
| 116 | 113 | ||
| 117 | *task = (*task)->next; | 114 | *task = (*task)->next; |
| 118 | free( ptask ); | 115 | free(ptask); |
| 119 | break; | 116 | break; |
| 120 | } | 117 | } |
| 121 | 118 | ||
| 122 | /* Release lock */ | 119 | /* Release lock */ |
| 123 | pthread_mutex_unlock( &tasklist_mutex ); | 120 | pthread_mutex_unlock(&tasklist_mutex); |
| 124 | } | 121 | } |
| 125 | 122 | ||
| 126 | ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ) { | 123 | ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype) { |
| 127 | struct ot_task * task; | 124 | struct ot_task *task; |
| 128 | ot_taskid taskid = 0; | 125 | ot_taskid taskid = 0; |
| 129 | 126 | ||
| 130 | /* Want exclusive access to tasklist */ | 127 | /* Want exclusive access to tasklist */ |
| 131 | pthread_mutex_lock( &tasklist_mutex ); | 128 | pthread_mutex_lock(&tasklist_mutex); |
| 132 | 129 | ||
| 133 | while( !taskid ) { | 130 | while (!taskid) { |
| 134 | /* Skip to the first unassigned task this worker wants to do */ | 131 | /* Skip to the first unassigned task this worker wants to do */ |
| 135 | for (task = tasklist; task; task = task->next) | 132 | for (task = tasklist; task; task = task->next) |
| 136 | if (!task->taskid && ( TASK_CLASS_MASK & task->tasktype ) == *tasktype) { | 133 | if (!task->taskid && (TASK_CLASS_MASK & task->tasktype) == *tasktype) { |
| 137 | /* If we found an outstanding task, assign a taskid to it | 134 | /* If we found an outstanding task, assign a taskid to it |
| 138 | and leave the loop */ | 135 | and leave the loop */ |
| 139 | task->taskid = taskid = ++next_free_taskid; | 136 | task->taskid = taskid = ++next_free_taskid; |
| 140 | *tasktype = task->tasktype; | 137 | *tasktype = task->tasktype; |
| 141 | break; | 138 | break; |
| 142 | } | 139 | } |
| 143 | 140 | ||
| 144 | /* Wait until the next task is being fed */ | 141 | /* Wait until the next task is being fed */ |
| 145 | if (!taskid) | 142 | if (!taskid) |
| 146 | pthread_cond_wait( &tasklist_being_filled, &tasklist_mutex ); | 143 | pthread_cond_wait(&tasklist_being_filled, &tasklist_mutex); |
| 147 | } | 144 | } |
| 148 | 145 | ||
| 149 | /* Release lock */ | 146 | /* Release lock */ |
| 150 | pthread_mutex_unlock( &tasklist_mutex ); | 147 | pthread_mutex_unlock(&tasklist_mutex); |
| 151 | 148 | ||
| 152 | return taskid; | 149 | return taskid; |
| 153 | } | 150 | } |
| 154 | 151 | ||
| 155 | void mutex_workqueue_pushsuccess( ot_taskid taskid ) { | 152 | void mutex_workqueue_pushsuccess(ot_taskid taskid) { |
| 156 | struct ot_task ** task; | 153 | struct ot_task **task; |
| 157 | 154 | ||
| 158 | /* Want exclusive access to tasklist */ | 155 | /* Want exclusive access to tasklist */ |
| 159 | pthread_mutex_lock( &tasklist_mutex ); | 156 | pthread_mutex_lock(&tasklist_mutex); |
| 160 | 157 | ||
| 161 | for (task = &tasklist; *task; task = &((*task)->next)) | 158 | for (task = &tasklist; *task; task = &((*task)->next)) |
| 162 | if ((*task)->taskid == taskid) { | 159 | if ((*task)->taskid == taskid) { |
| 163 | struct ot_task *ptask = *task; | 160 | struct ot_task *ptask = *task; |
| 164 | *task = (*task)->next; | 161 | *task = (*task)->next; |
| 165 | free( ptask ); | 162 | free(ptask); |
| 166 | break; | 163 | break; |
| 167 | } | 164 | } |
| 168 | 165 | ||
| 169 | /* Release lock */ | 166 | /* Release lock */ |
| 170 | pthread_mutex_unlock( &tasklist_mutex ); | 167 | pthread_mutex_unlock(&tasklist_mutex); |
| 171 | } | 168 | } |
| 172 | 169 | ||
| 173 | int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovec ) { | 170 | int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovec) { |
| 174 | struct ot_task * task; | 171 | struct ot_task *task; |
| 175 | const char byte = 'o'; | 172 | const char byte = 'o'; |
| 176 | 173 | ||
| 177 | /* Want exclusive access to tasklist */ | 174 | /* Want exclusive access to tasklist */ |
| 178 | pthread_mutex_lock( &tasklist_mutex ); | 175 | pthread_mutex_lock(&tasklist_mutex); |
| 179 | 176 | ||
| 180 | for (task = tasklist; task; task = task->next) | 177 | for (task = tasklist; task; task = task->next) |
| 181 | if (task->taskid == taskid) { | 178 | if (task->taskid == taskid) { |
| @@ -186,55 +183,90 @@ int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iove | |||
| 186 | } | 183 | } |
| 187 | 184 | ||
| 188 | /* Release lock */ | 185 | /* Release lock */ |
| 189 | pthread_mutex_unlock( &tasklist_mutex ); | 186 | pthread_mutex_unlock(&tasklist_mutex); |
| 190 | 187 | ||
| 191 | io_trywrite( g_self_pipe[1], &byte, 1 ); | 188 | io_trywrite(g_self_pipe[1], &byte, 1); |
| 192 | 189 | ||
| 193 | /* Indicate whether the worker has to throw away results */ | 190 | /* Indicate whether the worker has to throw away results */ |
| 194 | return task ? 0 : -1; | 191 | return task ? 0 : -1; |
| 195 | } | 192 | } |
| 196 | 193 | ||
| 197 | int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) { | 194 | int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec) { |
| 198 | struct ot_task ** task; | 195 | struct ot_task *task; |
| 199 | int64 sock = -1; | 196 | const char byte = 'o'; |
| 200 | 197 | ||
| 201 | /* Want exclusive access to tasklist */ | 198 | /* Want exclusive access to tasklist */ |
| 202 | pthread_mutex_lock( &tasklist_mutex ); | 199 | pthread_mutex_lock(&tasklist_mutex); |
| 203 | 200 | ||
| 204 | for (task = &tasklist; *task; task = &((*task)->next)) | 201 | for (task = tasklist; task; task = task->next) |
| 205 | if ((*task)->tasktype == TASK_DONE) { | 202 | if (task->taskid == taskid) { |
| 206 | struct ot_task *ptask = *task; | 203 | if (iovec) { |
| 204 | if (iovec_append(&task->iovec_entries, &task->iovec, iovec)) | ||
| 205 | task->tasktype = TASK_DONE_PARTIAL; | ||
| 206 | else | ||
| 207 | task = NULL; | ||
| 208 | } else | ||
| 209 | task->tasktype = TASK_DONE; | ||
| 210 | break; | ||
| 211 | } | ||
| 207 | 212 | ||
| 208 | *iovec_entries = (*task)->iovec_entries; | 213 | /* Release lock */ |
| 209 | *iovec = (*task)->iovec; | 214 | pthread_mutex_unlock(&tasklist_mutex); |
| 210 | sock = (*task)->sock; | ||
| 211 | 215 | ||
| 212 | *task = (*task)->next; | 216 | io_trywrite(g_self_pipe[1], &byte, 1); |
| 213 | free( ptask ); | 217 | |
| 218 | /* Indicate whether the worker has to throw away results */ | ||
| 219 | return task ? 0 : -1; | ||
| 220 | } | ||
| 221 | |||
| 222 | int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovec, int *is_partial) { | ||
| 223 | struct ot_task **task; | ||
| 224 | int64 sock = -1; | ||
| 225 | |||
| 226 | *is_partial = 0; | ||
| 227 | |||
| 228 | /* Want exclusive access to tasklist */ | ||
| 229 | pthread_mutex_lock(&tasklist_mutex); | ||
| 230 | |||
| 231 | for (task = &tasklist; *task; task = &((*task)->next)) | ||
| 232 | if (((*task)->tasktype & TASK_CLASS_MASK) == TASK_DONE) { | ||
| 233 | struct ot_task *ptask = *task; | ||
| 234 | *iovec_entries = ptask->iovec_entries; | ||
| 235 | *iovec = ptask->iovec; | ||
| 236 | sock = ptask->sock; | ||
| 237 | |||
| 238 | if ((*task)->tasktype == TASK_DONE) { | ||
| 239 | *task = ptask->next; | ||
| 240 | free(ptask); | ||
| 241 | } else { | ||
| 242 | ptask->iovec_entries = 0; | ||
| 243 | ptask->iovec = NULL; | ||
| 244 | *is_partial = 1; | ||
| 245 | /* Prevent task from showing up immediately again unless new data was added */ | ||
| 246 | (*task)->tasktype = TASK_FULLSCRAPE; | ||
| 247 | } | ||
| 214 | break; | 248 | break; |
| 215 | } | 249 | } |
| 216 | 250 | ||
| 217 | /* Release lock */ | 251 | /* Release lock */ |
| 218 | pthread_mutex_unlock( &tasklist_mutex ); | 252 | pthread_mutex_unlock(&tasklist_mutex); |
| 219 | return sock; | 253 | return sock; |
| 220 | } | 254 | } |
| 221 | 255 | ||
| 222 | void mutex_init( ) { | 256 | void mutex_init() { |
| 223 | int i; | 257 | int i; |
| 224 | pthread_mutex_init(&tasklist_mutex, NULL); | 258 | pthread_mutex_init(&tasklist_mutex, NULL); |
| 225 | pthread_cond_init (&tasklist_being_filled, NULL); | 259 | pthread_cond_init(&tasklist_being_filled, NULL); |
| 226 | for (i=0; i < OT_BUCKET_COUNT; ++i) | 260 | for (i = 0; i < OT_BUCKET_COUNT; ++i) |
| 227 | pthread_mutex_init(bucket_mutex + i, NULL); | 261 | pthread_mutex_init(bucket_mutex + i, NULL); |
| 228 | byte_zero( all_torrents, sizeof( all_torrents ) ); | 262 | byte_zero(all_torrents, sizeof(all_torrents)); |
| 229 | } | 263 | } |
| 230 | 264 | ||
| 231 | void mutex_deinit( ) { | 265 | void mutex_deinit() { |
| 232 | int i; | 266 | int i; |
| 233 | for (i=0; i < OT_BUCKET_COUNT; ++i) | 267 | for (i = 0; i < OT_BUCKET_COUNT; ++i) |
| 234 | pthread_mutex_destroy(bucket_mutex + i); | 268 | pthread_mutex_destroy(bucket_mutex + i); |
| 235 | pthread_mutex_destroy(&tasklist_mutex); | 269 | pthread_mutex_destroy(&tasklist_mutex); |
| 236 | pthread_cond_destroy(&tasklist_being_filled); | 270 | pthread_cond_destroy(&tasklist_being_filled); |
| 237 | byte_zero( all_torrents, sizeof( all_torrents ) ); | 271 | byte_zero(all_torrents, sizeof(all_torrents)); |
| 238 | } | 272 | } |
| 239 | |||
| 240 | const char *g_version_mutex_c = "$Source$: $Revision$\n"; | ||
| @@ -7,69 +7,74 @@ | |||
| 7 | #define OT_MUTEX_H__ | 7 | #define OT_MUTEX_H__ |
| 8 | 8 | ||
| 9 | #include <sys/uio.h> | 9 | #include <sys/uio.h> |
| 10 | #include "trackerlogic.h" | ||
| 10 | 11 | ||
| 11 | void mutex_init( void ); | 12 | void mutex_init(void); |
| 12 | void mutex_deinit( void ); | 13 | void mutex_deinit(void); |
| 13 | 14 | ||
| 14 | ot_vector *mutex_bucket_lock( int bucket ); | 15 | ot_vector *mutex_bucket_lock(int bucket); |
| 15 | ot_vector *mutex_bucket_lock_by_hash( ot_hash hash ); | 16 | ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash); |
| 16 | 17 | ||
| 17 | void mutex_bucket_unlock( int bucket, int delta_torrentcount ); | 18 | void mutex_bucket_unlock(int bucket, int delta_torrentcount); |
| 18 | void mutex_bucket_unlock_by_hash( ot_hash hash, int delta_torrentcount ); | 19 | void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount); |
| 19 | 20 | ||
| 20 | size_t mutex_get_torrent_count(void); | 21 | size_t mutex_get_torrent_count(void); |
| 21 | 22 | ||
| 22 | typedef enum { | 23 | typedef enum { |
| 23 | TASK_STATS_CONNS = 0x0001, | 24 | TASK_STATS_CONNS = 0x0001, |
| 24 | TASK_STATS_TCP = 0x0002, | 25 | TASK_STATS_TCP = 0x0002, |
| 25 | TASK_STATS_UDP = 0x0003, | 26 | TASK_STATS_UDP = 0x0003, |
| 26 | TASK_STATS_SCRAPE = 0x0004, | 27 | TASK_STATS_SCRAPE = 0x0004, |
| 27 | TASK_STATS_FULLSCRAPE = 0x0005, | 28 | TASK_STATS_FULLSCRAPE = 0x0005, |
| 28 | TASK_STATS_TPB = 0x0006, | 29 | TASK_STATS_TPB = 0x0006, |
| 29 | TASK_STATS_HTTPERRORS = 0x0007, | 30 | TASK_STATS_HTTPERRORS = 0x0007, |
| 30 | TASK_STATS_VERSION = 0x0008, | 31 | TASK_STATS_VERSION = 0x0008, |
| 31 | TASK_STATS_BUSY_NETWORKS = 0x0009, | 32 | TASK_STATS_BUSY_NETWORKS = 0x0009, |
| 32 | TASK_STATS_RENEW = 0x000a, | 33 | TASK_STATS_RENEW = 0x000a, |
| 33 | TASK_STATS_SYNCS = 0x000b, | 34 | TASK_STATS_SYNCS = 0x000b, |
| 34 | TASK_STATS_COMPLETED = 0x000c, | 35 | TASK_STATS_COMPLETED = 0x000c, |
| 35 | TASK_STATS_NUMWANTS = 0x000d, | 36 | TASK_STATS_NUMWANTS = 0x000d, |
| 36 | 37 | ||
| 37 | TASK_STATS = 0x0100, /* Mask */ | 38 | TASK_STATS = 0x0100, /* Mask */ |
| 38 | TASK_STATS_TORRENTS = 0x0101, | 39 | TASK_STATS_TORRENTS = 0x0101, |
| 39 | TASK_STATS_PEERS = 0x0102, | 40 | TASK_STATS_PEERS = 0x0102, |
| 40 | TASK_STATS_SLASH24S = 0x0103, | 41 | TASK_STATS_SLASH24S = 0x0103, |
| 41 | TASK_STATS_TOP10 = 0x0104, | 42 | TASK_STATS_TOP10 = 0x0104, |
| 42 | TASK_STATS_TOP100 = 0x0105, | 43 | TASK_STATS_TOP100 = 0x0105, |
| 43 | TASK_STATS_EVERYTHING = 0x0106, | 44 | TASK_STATS_EVERYTHING = 0x0106, |
| 44 | TASK_STATS_FULLLOG = 0x0107, | 45 | TASK_STATS_FULLLOG = 0x0107, |
| 45 | TASK_STATS_WOODPECKERS = 0x0108, | 46 | TASK_STATS_WOODPECKERS = 0x0108, |
| 46 | 47 | ||
| 47 | TASK_FULLSCRAPE = 0x0200, /* Default mode */ | 48 | TASK_FULLSCRAPE = 0x0200, /* Default mode */ |
| 48 | TASK_FULLSCRAPE_TPB_BINARY = 0x0201, | 49 | TASK_FULLSCRAPE_TPB_BINARY = 0x0201, |
| 49 | TASK_FULLSCRAPE_TPB_ASCII = 0x0202, | 50 | TASK_FULLSCRAPE_TPB_ASCII = 0x0202, |
| 50 | TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203, | 51 | TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203, |
| 51 | TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204, | 52 | TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204, |
| 52 | TASK_FULLSCRAPE_TRACKERSTATE = 0x0205, | 53 | TASK_FULLSCRAPE_TRACKERSTATE = 0x0205, |
| 53 | 54 | ||
| 54 | TASK_DMEM = 0x0300, | 55 | TASK_DMEM = 0x0300, |
| 55 | 56 | ||
| 56 | TASK_DONE = 0x0f00, | 57 | TASK_DONE = 0x0f00, |
| 57 | 58 | TASK_DONE_PARTIAL = 0x0f01, | |
| 58 | TASK_FLAG_GZIP = 0x1000, | 59 | |
| 59 | TASK_FLAG_BZIP2 = 0x2000, | 60 | TASK_FLAG_GZIP = 0x1000, |
| 60 | 61 | TASK_FLAG_BZIP2 = 0x2000, | |
| 61 | TASK_TASK_MASK = 0x0fff, | 62 | TASK_FLAG_ZSTD = 0x4000, |
| 62 | TASK_CLASS_MASK = 0x0f00, | 63 | TASK_FLAG_CHUNKED = 0x8000, |
| 63 | TASK_FLAGS_MASK = 0xf000 | 64 | |
| 65 | TASK_TASK_MASK = 0x0fff, | ||
| 66 | TASK_CLASS_MASK = 0x0f00, | ||
| 67 | TASK_FLAGS_MASK = 0xf000 | ||
| 64 | } ot_tasktype; | 68 | } ot_tasktype; |
| 65 | 69 | ||
| 66 | typedef unsigned long ot_taskid; | 70 | typedef unsigned long ot_taskid; |
| 67 | 71 | ||
| 68 | int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ); | 72 | int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype); |
| 69 | void mutex_workqueue_canceltask( int64 sock ); | 73 | void mutex_workqueue_canceltask(int64 sock); |
| 70 | void mutex_workqueue_pushsuccess( ot_taskid taskid ); | 74 | void mutex_workqueue_pushsuccess(ot_taskid taskid); |
| 71 | ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ); | 75 | ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype); |
| 72 | int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovector ); | 76 | int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovector); |
| 73 | int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovector ); | 77 | int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec); |
| 78 | int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovector, int *is_partial); | ||
| 74 | 79 | ||
| 75 | #endif | 80 | #endif |
diff --git a/ot_rijndael.c b/ot_rijndael.c index f468e2f..3f36bde 100644 --- a/ot_rijndael.c +++ b/ot_rijndael.c | |||
| @@ -486,5 +486,3 @@ void rijndaelEncrypt128(const uint32_t rk[44], const uint8_t pt[16], uint8_t ct[ | |||
| 486 | rk[43]; | 486 | rk[43]; |
| 487 | PUTU32(ct + 12, s3); | 487 | PUTU32(ct + 12, s3); |
| 488 | } | 488 | } |
| 489 | |||
| 490 | const char *g_version_rijndael_c = "$Source$: $Revision$\n"; | ||
| @@ -4,16 +4,16 @@ | |||
| 4 | $id$ */ | 4 | $id$ */ |
| 5 | 5 | ||
| 6 | /* System */ | 6 | /* System */ |
| 7 | #include <stdlib.h> | ||
| 8 | #include <arpa/inet.h> | 7 | #include <arpa/inet.h> |
| 9 | #include <sys/types.h> | 8 | #include <inttypes.h> |
| 10 | #include <sys/uio.h> | 9 | #include <pthread.h> |
| 11 | #include <sys/mman.h> | ||
| 12 | #include <stdio.h> | 10 | #include <stdio.h> |
| 11 | #include <stdlib.h> | ||
| 13 | #include <string.h> | 12 | #include <string.h> |
| 14 | #include <pthread.h> | 13 | #include <sys/mman.h> |
| 14 | #include <sys/types.h> | ||
| 15 | #include <sys/uio.h> | ||
| 15 | #include <unistd.h> | 16 | #include <unistd.h> |
| 16 | #include <inttypes.h> | ||
| 17 | #ifdef WANT_SYSLOGS | 17 | #ifdef WANT_SYSLOGS |
| 18 | #include <syslog.h> | 18 | #include <syslog.h> |
| 19 | #endif | 19 | #endif |
| @@ -25,61 +25,63 @@ | |||
| 25 | #include "ip6.h" | 25 | #include "ip6.h" |
| 26 | 26 | ||
| 27 | /* Opentracker */ | 27 | /* Opentracker */ |
| 28 | #include "trackerlogic.h" | 28 | #include "ot_accesslist.h" |
| 29 | #include "ot_mutex.h" | ||
| 30 | #include "ot_iovec.h" | 29 | #include "ot_iovec.h" |
| 30 | #include "ot_mutex.h" | ||
| 31 | #include "ot_stats.h" | 31 | #include "ot_stats.h" |
| 32 | #include "ot_accesslist.h" | 32 | #include "trackerlogic.h" |
| 33 | 33 | ||
| 34 | #ifndef NO_FULLSCRAPE_LOGGING | 34 | #ifndef NO_FULLSCRAPE_LOGGING |
| 35 | #define LOG_TO_STDERR( ... ) fprintf( stderr, __VA_ARGS__ ) | 35 | #define LOG_TO_STDERR(...) fprintf(stderr, __VA_ARGS__) |
| 36 | #else | 36 | #else |
| 37 | #define LOG_TO_STDERR( ... ) | 37 | #define LOG_TO_STDERR(...) |
| 38 | #endif | 38 | #endif |
| 39 | 39 | ||
| 40 | /* Forward declaration */ | 40 | /* Forward declaration */ |
| 41 | static void stats_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ); | 41 | static void stats_make(int *iovec_entries, struct iovec **iovector, ot_tasktype mode); |
| 42 | #define OT_STATS_TMPSIZE 8192 | 42 | #define OT_STATS_TMPSIZE 8192 |
| 43 | 43 | ||
| 44 | /* Clumsy counters... to be rethought */ | 44 | /* Clumsy counters... to be rethought */ |
| 45 | static unsigned long long ot_overall_tcp_connections = 0; | 45 | static unsigned long long ot_overall_tcp_connections; |
| 46 | static unsigned long long ot_overall_udp_connections = 0; | 46 | static unsigned long long ot_overall_udp_connections; |
| 47 | static unsigned long long ot_overall_tcp_successfulannounces = 0; | 47 | static unsigned long long ot_overall_tcp_successfulannounces; |
| 48 | static unsigned long long ot_overall_udp_successfulannounces = 0; | 48 | static unsigned long long ot_overall_udp_successfulannounces; |
| 49 | static unsigned long long ot_overall_tcp_successfulscrapes = 0; | 49 | static unsigned long long ot_overall_tcp_successfulscrapes; |
| 50 | static unsigned long long ot_overall_udp_successfulscrapes = 0; | 50 | static unsigned long long ot_overall_udp_successfulscrapes; |
| 51 | static unsigned long long ot_overall_udp_connectionidmissmatches = 0; | 51 | static unsigned long long ot_overall_udp_connectionidmissmatches; |
| 52 | static unsigned long long ot_overall_tcp_connects = 0; | 52 | static unsigned long long ot_overall_tcp_connects; |
| 53 | static unsigned long long ot_overall_udp_connects = 0; | 53 | static unsigned long long ot_overall_udp_connects; |
| 54 | static unsigned long long ot_overall_completed = 0; | 54 | static unsigned long long ot_overall_completed; |
| 55 | static unsigned long long ot_full_scrape_count = 0; | 55 | static unsigned long long ot_full_scrape_count; |
| 56 | static unsigned long long ot_full_scrape_request_count = 0; | 56 | static unsigned long long ot_full_scrape_request_count; |
| 57 | static unsigned long long ot_full_scrape_size = 0; | 57 | static unsigned long long ot_full_scrape_size; |
| 58 | static unsigned long long ot_failed_request_counts[CODE_HTTPERROR_COUNT]; | 58 | static unsigned long long ot_failed_request_counts[CODE_HTTPERROR_COUNT]; |
| 59 | static char * ot_failed_request_names[] = { "302 Redirect", "400 Parse Error", "400 Invalid Parameter", "400 Invalid Parameter (compact=0)", "400 Not Modest", "402 Payment Required", "403 Access Denied", "404 Not found", "500 Internal Server Error" }; | 59 | static char *ot_failed_request_names[] = { |
| 60 | "302 Redirect", "400 Parse Error", "400 Invalid Parameter", "400 Invalid Parameter (compact=0)", "400 Not Modest", | ||
| 61 | "402 Payment Required", "403 Access Denied", "404 Not found", "500 Internal Server Error"}; | ||
| 60 | static unsigned long long ot_renewed[OT_PEER_TIMEOUT]; | 62 | static unsigned long long ot_renewed[OT_PEER_TIMEOUT]; |
| 61 | static unsigned long long ot_overall_sync_count; | 63 | static unsigned long long ot_overall_sync_count; |
| 62 | static unsigned long long ot_overall_stall_count; | 64 | static unsigned long long ot_overall_stall_count; |
| 63 | 65 | ||
| 64 | static time_t ot_start_time; | 66 | static time_t ot_start_time; |
| 65 | 67 | ||
| 66 | #define STATS_NETWORK_NODE_BITWIDTH 4 | 68 | #define STATS_NETWORK_NODE_BITWIDTH 4 |
| 67 | #define STATS_NETWORK_NODE_COUNT (1<<STATS_NETWORK_NODE_BITWIDTH) | 69 | #define STATS_NETWORK_NODE_COUNT (1 << STATS_NETWORK_NODE_BITWIDTH) |
| 68 | 70 | ||
| 69 | #define __BYTE(P,D) (((uint8_t*)P)[D/8]) | 71 | #define __BYTE(P, D) (((uint8_t *)P)[D / 8]) |
| 70 | #define __MSK (STATS_NETWORK_NODE_COUNT-1) | 72 | #define __MSK (STATS_NETWORK_NODE_COUNT - 1) |
| 71 | #define __SHFT(D) ((D^STATS_NETWORK_NODE_BITWIDTH)&STATS_NETWORK_NODE_BITWIDTH) | 73 | #define __SHFT(D) ((D ^ STATS_NETWORK_NODE_BITWIDTH) & STATS_NETWORK_NODE_BITWIDTH) |
| 72 | 74 | ||
| 73 | #define __LDR(P,D) ((__BYTE((P),(D))>>__SHFT((D)))&__MSK) | 75 | #define __LDR(P, D) ((__BYTE((P), (D)) >> __SHFT((D))) & __MSK) |
| 74 | #define __STR(P,D,V) __BYTE((P),(D))=(__BYTE((P),(D))&~(__MSK<<__SHFT((D))))|((V)<<__SHFT((D))) | 76 | #define __STR(P, D, V) __BYTE((P), (D)) = (__BYTE((P), (D)) & ~(__MSK << __SHFT((D)))) | ((V) << __SHFT((D))) |
| 75 | 77 | ||
| 76 | #ifdef WANT_V6 | 78 | #if 0 |
| 77 | #define STATS_NETWORK_NODE_MAXDEPTH (68-STATS_NETWORK_NODE_BITWIDTH) | 79 | // XXX |
| 78 | #define STATS_NETWORK_NODE_LIMIT (48-STATS_NETWORK_NODE_BITWIDTH) | 80 | #define STATS_NETWORK_NODE_MAXDEPTH (68 - STATS_NETWORK_NODE_BITWIDTH) |
| 79 | #else | 81 | #define STATS_NETWORK_NODE_LIMIT (48 - STATS_NETWORK_NODE_BITWIDTH) |
| 80 | #define STATS_NETWORK_NODE_MAXDEPTH (28-STATS_NETWORK_NODE_BITWIDTH) | ||
| 81 | #define STATS_NETWORK_NODE_LIMIT (24-STATS_NETWORK_NODE_BITWIDTH) | ||
| 82 | #endif | 82 | #endif |
| 83 | #define STATS_NETWORK_NODE_MAXDEPTH (28 - STATS_NETWORK_NODE_BITWIDTH) | ||
| 84 | #define STATS_NETWORK_NODE_LIMIT (24 - STATS_NETWORK_NODE_BITWIDTH) | ||
| 83 | 85 | ||
| 84 | typedef union stats_network_node stats_network_node; | 86 | typedef union stats_network_node stats_network_node; |
| 85 | union stats_network_node { | 87 | union stats_network_node { |
| @@ -91,120 +93,125 @@ union stats_network_node { | |||
| 91 | static stats_network_node *stats_network_counters_root; | 93 | static stats_network_node *stats_network_counters_root; |
| 92 | #endif | 94 | #endif |
| 93 | 95 | ||
| 94 | static int stat_increase_network_count( stats_network_node **pnode, int depth, uintptr_t ip ) { | 96 | static int stat_increase_network_count(stats_network_node **pnode, int depth, uintptr_t ip) { |
| 95 | int foo = __LDR(ip,depth); | 97 | int foo = __LDR(ip, depth); |
| 96 | stats_network_node *node; | 98 | stats_network_node *node; |
| 97 | 99 | ||
| 98 | if( !*pnode ) { | 100 | if (!*pnode) { |
| 99 | *pnode = malloc( sizeof( stats_network_node ) ); | 101 | *pnode = malloc(sizeof(stats_network_node)); |
| 100 | if( !*pnode ) | 102 | if (!*pnode) |
| 101 | return -1; | 103 | return -1; |
| 102 | memset( *pnode, 0, sizeof( stats_network_node ) ); | 104 | memset(*pnode, 0, sizeof(stats_network_node)); |
| 103 | } | 105 | } |
| 104 | node = *pnode; | 106 | node = *pnode; |
| 105 | 107 | ||
| 106 | if( depth < STATS_NETWORK_NODE_MAXDEPTH ) | 108 | if (depth < STATS_NETWORK_NODE_MAXDEPTH) |
| 107 | return stat_increase_network_count( node->children + foo, depth+STATS_NETWORK_NODE_BITWIDTH, ip ); | 109 | return stat_increase_network_count(node->children + foo, depth + STATS_NETWORK_NODE_BITWIDTH, ip); |
| 108 | 110 | ||
| 109 | node->counters[ foo ]++; | 111 | node->counters[foo]++; |
| 110 | return 0; | 112 | return 0; |
| 111 | } | 113 | } |
| 112 | 114 | ||
| 113 | static int stats_shift_down_network_count( stats_network_node **node, int depth, int shift ) { | 115 | static int stats_shift_down_network_count(stats_network_node **node, int depth, int shift) { |
| 114 | int i, rest = 0; | 116 | int i, rest = 0; |
| 115 | 117 | ||
| 116 | if( !*node ) | 118 | if (!*node) |
| 117 | return 0; | 119 | return 0; |
| 118 | 120 | ||
| 119 | for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) | 121 | for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) |
| 120 | if( depth < STATS_NETWORK_NODE_MAXDEPTH ) | 122 | if (depth < STATS_NETWORK_NODE_MAXDEPTH) |
| 121 | rest += stats_shift_down_network_count( (*node)->children + i, depth+STATS_NETWORK_NODE_BITWIDTH, shift ); | 123 | rest += stats_shift_down_network_count((*node)->children + i, depth + STATS_NETWORK_NODE_BITWIDTH, shift); |
| 122 | else | 124 | else |
| 123 | rest += (*node)->counters[i] >>= shift; | 125 | rest += (*node)->counters[i] >>= shift; |
| 124 | 126 | ||
| 125 | if( !rest ) { | 127 | if (!rest) { |
| 126 | free( *node ); | 128 | free(*node); |
| 127 | *node = NULL; | 129 | *node = NULL; |
| 128 | } | 130 | } |
| 129 | 131 | ||
| 130 | return rest; | 132 | return rest; |
| 131 | } | 133 | } |
| 132 | 134 | ||
| 133 | static size_t stats_get_highscore_networks( stats_network_node *node, int depth, ot_ip6 node_value, size_t *scores, ot_ip6 *networks, int network_count, int limit ) { | 135 | static size_t stats_get_highscore_networks(stats_network_node *node, int depth, ot_ip6 node_value, size_t *scores, ot_ip6 *networks, int network_count, |
| 136 | int limit) { | ||
| 134 | size_t score = 0; | 137 | size_t score = 0; |
| 135 | int i; | 138 | int i; |
| 136 | 139 | ||
| 137 | if( !node ) return 0; | 140 | if (!node) |
| 141 | return 0; | ||
| 138 | 142 | ||
| 139 | if( depth < limit ) { | 143 | if (depth < limit) { |
| 140 | for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) | 144 | for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) |
| 141 | if( node->children[i] ) { | 145 | if (node->children[i]) { |
| 142 | __STR(node_value,depth,i); | 146 | __STR(node_value, depth, i); |
| 143 | score += stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); | 147 | score += stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit); |
| 144 | } | 148 | } |
| 145 | return score; | 149 | return score; |
| 146 | } | 150 | } |
| 147 | 151 | ||
| 148 | if( depth > limit && depth < STATS_NETWORK_NODE_MAXDEPTH ) { | 152 | if (depth > limit && depth < STATS_NETWORK_NODE_MAXDEPTH) { |
| 149 | for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) | 153 | for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) |
| 150 | if( node->children[i] ) | 154 | if (node->children[i]) |
| 151 | score += stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); | 155 | score += stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit); |
| 152 | return score; | 156 | return score; |
| 153 | } | 157 | } |
| 154 | 158 | ||
| 155 | if( depth > limit && depth == STATS_NETWORK_NODE_MAXDEPTH ) { | 159 | if (depth > limit && depth == STATS_NETWORK_NODE_MAXDEPTH) { |
| 156 | for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) | 160 | for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) |
| 157 | score += node->counters[i]; | 161 | score += node->counters[i]; |
| 158 | return score; | 162 | return score; |
| 159 | } | 163 | } |
| 160 | 164 | ||
| 161 | /* if( depth == limit ) */ | 165 | /* if( depth == limit ) */ |
| 162 | for( i=0; i<STATS_NETWORK_NODE_COUNT; ++i ) { | 166 | for (i = 0; i < STATS_NETWORK_NODE_COUNT; ++i) { |
| 163 | int j=1; | 167 | int j = 1; |
| 164 | size_t node_score; | 168 | size_t node_score; |
| 165 | 169 | ||
| 166 | if( depth == STATS_NETWORK_NODE_MAXDEPTH ) | 170 | if (depth == STATS_NETWORK_NODE_MAXDEPTH) |
| 167 | node_score = node->counters[i]; | 171 | node_score = node->counters[i]; |
| 168 | else | 172 | else |
| 169 | node_score = stats_get_highscore_networks( node->children[i], depth+STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit ); | 173 | node_score = stats_get_highscore_networks(node->children[i], depth + STATS_NETWORK_NODE_BITWIDTH, node_value, scores, networks, network_count, limit); |
| 170 | 174 | ||
| 171 | score += node_score; | 175 | score += node_score; |
| 172 | 176 | ||
| 173 | if( node_score <= scores[0] ) continue; | 177 | if (node_score <= scores[0]) |
| 178 | continue; | ||
| 174 | 179 | ||
| 175 | __STR(node_value,depth,i); | 180 | __STR(node_value, depth, i); |
| 176 | while( j < network_count && node_score > scores[j] ) ++j; | 181 | while (j < network_count && node_score > scores[j]) |
| 182 | ++j; | ||
| 177 | --j; | 183 | --j; |
| 178 | 184 | ||
| 179 | memcpy( scores, scores + 1, j * sizeof( *scores ) ); | 185 | memcpy(scores, scores + 1, j * sizeof(*scores)); |
| 180 | memcpy( networks, networks + 1, j * sizeof( *networks ) ); | 186 | memcpy(networks, networks + 1, j * sizeof(*networks)); |
| 181 | scores[ j ] = node_score; | 187 | scores[j] = node_score; |
| 182 | memcpy( networks + j, node_value, sizeof( *networks ) ); | 188 | memcpy(networks + j, node_value, sizeof(*networks)); |
| 183 | } | 189 | } |
| 184 | 190 | ||
| 185 | return score; | 191 | return score; |
| 186 | } | 192 | } |
| 187 | 193 | ||
| 188 | static size_t stats_return_busy_networks( char * reply, stats_network_node *tree, int amount, int limit ) { | 194 | static size_t stats_return_busy_networks(char *reply, stats_network_node *tree, int amount, int limit) { |
| 189 | ot_ip6 networks[amount]; | 195 | ot_ip6 networks[amount]; |
| 190 | ot_ip6 node_value; | 196 | ot_ip6 node_value; |
| 191 | size_t scores[amount]; | 197 | size_t scores[amount]; |
| 192 | int i; | 198 | int i; |
| 193 | char * r = reply; | 199 | char *r = reply; |
| 194 | 200 | ||
| 195 | memset( scores, 0, sizeof( scores ) ); | 201 | memset(scores, 0, sizeof(scores)); |
| 196 | memset( networks, 0, sizeof( networks ) ); | 202 | memset(networks, 0, sizeof(networks)); |
| 197 | memset( node_value, 0, sizeof( node_value ) ); | 203 | memset(node_value, 0, sizeof(node_value)); |
| 198 | 204 | ||
| 199 | stats_get_highscore_networks( tree, 0, node_value, scores, networks, amount, limit ); | 205 | stats_get_highscore_networks(tree, 0, node_value, scores, networks, amount, limit); |
| 200 | 206 | ||
| 201 | r += sprintf( r, "Networks, limit /%d:\n", limit+STATS_NETWORK_NODE_BITWIDTH ); | 207 | r += sprintf(r, "Networks, limit /%d:\n", limit + STATS_NETWORK_NODE_BITWIDTH); |
| 202 | for( i=amount-1; i>=0; --i) { | 208 | for (i = amount - 1; i >= 0; --i) { |
| 203 | if( scores[i] ) { | 209 | if (scores[i]) { |
| 204 | r += sprintf( r, "%08zd: ", scores[i] ); | 210 | r += sprintf(r, "%08zd: ", scores[i]); |
| 205 | #ifdef WANT_V6 | 211 | // #ifdef WANT_V6 |
| 206 | r += fmt_ip6c( r, networks[i] ); | 212 | r += fmt_ip6c(r, networks[i]); |
| 207 | #else | 213 | #if 0 |
| 214 | // XXX | ||
| 208 | r += fmt_ip4( r, networks[i]); | 215 | r += fmt_ip4( r, networks[i]); |
| 209 | #endif | 216 | #endif |
| 210 | *r++ = '\n'; | 217 | *r++ = '\n'; |
| @@ -215,64 +222,66 @@ static size_t stats_return_busy_networks( char * reply, stats_network_node *tree | |||
| 215 | return r - reply; | 222 | return r - reply; |
| 216 | } | 223 | } |
| 217 | 224 | ||
| 218 | static size_t stats_slash24s_txt( char *reply, size_t amount ) { | 225 | static size_t stats_slash24s_txt(char *reply, size_t amount) { |
| 219 | stats_network_node *slash24s_network_counters_root = NULL; | 226 | stats_network_node *slash24s_network_counters_root = NULL; |
| 220 | char *r=reply; | 227 | char *r = reply; |
| 221 | int bucket; | 228 | int bucket; |
| 222 | size_t i; | 229 | size_t i, peer_size = OT_PEER_SIZE4; |
| 223 | 230 | ||
| 224 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 231 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
| 225 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 232 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
| 226 | for( i=0; i<torrents_list->size; ++i ) { | 233 | for (i = 0; i < torrents_list->size; ++i) { |
| 227 | ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[i] ).peer_list; | 234 | ot_peerlist *peer_list = (((ot_torrent *)(torrents_list->data))[i]).peer_list4; |
| 228 | ot_vector *bucket_list = &peer_list->peers; | 235 | ot_vector *bucket_list = &peer_list->peers; |
| 229 | int num_buckets = 1; | 236 | int num_buckets = 1; |
| 230 | 237 | ||
| 231 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { | 238 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
| 232 | num_buckets = bucket_list->size; | 239 | num_buckets = bucket_list->size; |
| 233 | bucket_list = (ot_vector *)bucket_list->data; | 240 | bucket_list = (ot_vector *)bucket_list->data; |
| 234 | } | 241 | } |
| 235 | 242 | ||
| 236 | while( num_buckets-- ) { | 243 | while (num_buckets--) { |
| 237 | ot_peer *peers = (ot_peer*)bucket_list->data; | 244 | ot_peer *peers = (ot_peer *)bucket_list->data; |
| 238 | size_t numpeers = bucket_list->size; | 245 | size_t numpeers = bucket_list->size; |
| 239 | while( numpeers-- ) | 246 | while (numpeers--) { |
| 240 | if( stat_increase_network_count( &slash24s_network_counters_root, 0, (uintptr_t)(peers++) ) ) | 247 | if (stat_increase_network_count(&slash24s_network_counters_root, 0, (uintptr_t)(peers))) |
| 241 | goto bailout_unlock; | 248 | goto bailout_unlock; |
| 249 | peers += peer_size; | ||
| 250 | } | ||
| 242 | ++bucket_list; | 251 | ++bucket_list; |
| 243 | } | 252 | } |
| 244 | } | 253 | } |
| 245 | mutex_bucket_unlock( bucket, 0 ); | 254 | mutex_bucket_unlock(bucket, 0); |
| 246 | if( !g_opentracker_running ) | 255 | if (!g_opentracker_running) |
| 247 | goto bailout_error; | 256 | goto bailout_error; |
| 248 | } | 257 | } |
| 249 | 258 | ||
| 250 | /* The tree is built. Now analyze */ | 259 | /* The tree is built. Now analyze */ |
| 251 | r += stats_return_busy_networks( r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_MAXDEPTH ); | 260 | r += stats_return_busy_networks(r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_MAXDEPTH); |
| 252 | r += stats_return_busy_networks( r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_LIMIT ); | 261 | r += stats_return_busy_networks(r, slash24s_network_counters_root, amount, STATS_NETWORK_NODE_LIMIT); |
| 253 | goto success; | 262 | goto success; |
| 254 | 263 | ||
| 255 | bailout_unlock: | 264 | bailout_unlock: |
| 256 | mutex_bucket_unlock( bucket, 0 ); | 265 | mutex_bucket_unlock(bucket, 0); |
| 257 | bailout_error: | 266 | bailout_error: |
| 258 | r = reply; | 267 | r = reply; |
| 259 | success: | 268 | success: |
| 260 | stats_shift_down_network_count( &slash24s_network_counters_root, 0, sizeof(int)*8-1 ); | 269 | stats_shift_down_network_count(&slash24s_network_counters_root, 0, sizeof(int) * 8 - 1); |
| 261 | 270 | ||
| 262 | return r-reply; | 271 | return r - reply; |
| 263 | } | 272 | } |
| 264 | 273 | ||
| 265 | #ifdef WANT_SPOT_WOODPECKER | 274 | #ifdef WANT_SPOT_WOODPECKER |
| 266 | static stats_network_node *stats_woodpeckers_tree; | 275 | static stats_network_node *stats_woodpeckers_tree; |
| 267 | static pthread_mutex_t g_woodpeckers_mutex = PTHREAD_MUTEX_INITIALIZER; | 276 | static pthread_mutex_t g_woodpeckers_mutex = PTHREAD_MUTEX_INITIALIZER; |
| 268 | 277 | ||
| 269 | static size_t stats_return_woodpeckers( char * reply, int amount ) { | 278 | static size_t stats_return_woodpeckers(char *reply, int amount) { |
| 270 | char * r = reply; | 279 | char *r = reply; |
| 271 | 280 | ||
| 272 | pthread_mutex_lock( &g_woodpeckers_mutex ); | 281 | pthread_mutex_lock(&g_woodpeckers_mutex); |
| 273 | r += stats_return_busy_networks( r, stats_woodpeckers_tree, amount, STATS_NETWORK_NODE_MAXDEPTH ); | 282 | r += stats_return_busy_networks(r, stats_woodpeckers_tree, amount, STATS_NETWORK_NODE_MAXDEPTH); |
| 274 | pthread_mutex_unlock( &g_woodpeckers_mutex ); | 283 | pthread_mutex_unlock(&g_woodpeckers_mutex); |
| 275 | return r-reply; | 284 | return r - reply; |
| 276 | } | 285 | } |
| 277 | #endif | 286 | #endif |
| 278 | 287 | ||
| @@ -282,496 +291,495 @@ typedef struct { | |||
| 282 | unsigned long long seed_count; | 291 | unsigned long long seed_count; |
| 283 | } torrent_stats; | 292 | } torrent_stats; |
| 284 | 293 | ||
| 285 | static int torrent_statter( ot_torrent *torrent, uintptr_t data ) { | 294 | static int torrent_statter(ot_torrent *torrent, uintptr_t data) { |
| 286 | torrent_stats *stats = (torrent_stats*)data; | 295 | torrent_stats *stats = (torrent_stats *)data; |
| 287 | stats->torrent_count++; | 296 | stats->torrent_count++; |
| 288 | stats->peer_count += torrent->peer_list->peer_count; | 297 | stats->peer_count += torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; |
| 289 | stats->seed_count += torrent->peer_list->seed_count; | 298 | stats->seed_count += torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; |
| 290 | return 0; | 299 | return 0; |
| 291 | } | 300 | } |
| 292 | 301 | ||
| 293 | /* Converter function from memory to human readable hex strings */ | 302 | /* Converter function from memory to human readable hex strings */ |
| 294 | static char*to_hex(char*d,uint8_t*s){char*m="0123456789ABCDEF";char *t=d;char*e=d+40;while(d<e){*d++=m[*s>>4];*d++=m[*s++&15];}*d=0;return t;} | 303 | static char *to_hex(char *d, uint8_t *s) { |
| 304 | char *m = "0123456789ABCDEF"; | ||
| 305 | char *t = d; | ||
| 306 | char *e = d + 40; | ||
| 307 | while (d < e) { | ||
| 308 | *d++ = m[*s >> 4]; | ||
| 309 | *d++ = m[*s++ & 15]; | ||
| 310 | } | ||
| 311 | *d = 0; | ||
| 312 | return t; | ||
| 313 | } | ||
| 295 | 314 | ||
| 296 | typedef struct { size_t val; ot_hash hash; } ot_record; | 315 | typedef struct { |
| 316 | size_t val; | ||
| 317 | ot_hash hash; | ||
| 318 | } ot_record; | ||
| 297 | 319 | ||
| 298 | /* Fetches stats from tracker */ | 320 | /* Fetches stats from tracker */ |
| 299 | size_t stats_top_txt( char * reply, int amount ) { | 321 | size_t stats_top_txt(char *reply, int amount) { |
| 300 | size_t j; | 322 | size_t j; |
| 301 | ot_record top100s[100], top100c[100]; | 323 | ot_record top100s[100], top100c[100], top100l[100]; |
| 302 | char *r = reply, hex_out[42]; | 324 | char *r = reply, hex_out[42]; |
| 303 | int idx, bucket; | 325 | int idx, bucket; |
| 304 | 326 | ||
| 305 | if( amount > 100 ) | 327 | if (amount > 100) |
| 306 | amount = 100; | 328 | amount = 100; |
| 307 | 329 | ||
| 308 | byte_zero( top100s, sizeof( top100s ) ); | 330 | byte_zero(top100s, sizeof(top100s)); |
| 309 | byte_zero( top100c, sizeof( top100c ) ); | 331 | byte_zero(top100c, sizeof(top100c)); |
| 310 | 332 | byte_zero(top100l, sizeof(top100l)); | |
| 311 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 333 | |
| 312 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 334 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
| 313 | for( j=0; j<torrents_list->size; ++j ) { | 335 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
| 314 | ot_torrent *torrent = (ot_torrent*)(torrents_list->data) + j; | 336 | for (j = 0; j < torrents_list->size; ++j) { |
| 337 | ot_torrent *torrent = (ot_torrent *)(torrents_list->data) + j; | ||
| 338 | size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; | ||
| 339 | size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; | ||
| 340 | size_t leech_count = peer_count - seed_count; | ||
| 341 | idx = amount - 1; | ||
| 342 | while ((idx >= 0) && (peer_count > top100c[idx].val)) | ||
| 343 | --idx; | ||
| 344 | if (idx++ != amount - 1) { | ||
| 345 | memmove(top100c + idx + 1, top100c + idx, (amount - 1 - idx) * sizeof(ot_record)); | ||
| 346 | memcpy(&top100c[idx].hash, &torrent->hash, sizeof(ot_hash)); | ||
| 347 | top100c[idx].val = peer_count; | ||
| 348 | } | ||
| 315 | idx = amount - 1; | 349 | idx = amount - 1; |
| 316 | while( (idx >= 0) && ( torrent->peer_list->peer_count > top100c[idx].val ) ) | 350 | while ((idx >= 0) && (seed_count > top100s[idx].val)) |
| 317 | --idx; | 351 | --idx; |
| 318 | if ( idx++ != amount - 1 ) { | 352 | if (idx++ != amount - 1) { |
| 319 | memmove( top100c + idx + 1, top100c + idx, ( amount - 1 - idx ) * sizeof( ot_record ) ); | 353 | memmove(top100s + idx + 1, top100s + idx, (amount - 1 - idx) * sizeof(ot_record)); |
| 320 | memcpy( &top100c[idx].hash, &torrent->hash, sizeof(ot_hash)); | 354 | memcpy(&top100s[idx].hash, &torrent->hash, sizeof(ot_hash)); |
| 321 | top100c[idx].val = torrent->peer_list->peer_count; | 355 | top100s[idx].val = seed_count; |
| 322 | } | 356 | } |
| 323 | idx = amount - 1; | 357 | idx = amount - 1; |
| 324 | while( (idx >= 0) && ( torrent->peer_list->seed_count > top100s[idx].val ) ) | 358 | while ((idx >= 0) && (leech_count > top100l[idx].val)) |
| 325 | --idx; | 359 | --idx; |
| 326 | if ( idx++ != amount - 1 ) { | 360 | if (idx++ != amount - 1) { |
| 327 | memmove( top100s + idx + 1, top100s + idx, ( amount - 1 - idx ) * sizeof( ot_record ) ); | 361 | memmove(top100l + idx + 1, top100l + idx, (amount - 1 - idx) * sizeof(ot_record)); |
| 328 | memcpy( &top100s[idx].hash, &torrent->hash, sizeof(ot_hash)); | 362 | memcpy(&top100l[idx].hash, &torrent->hash, sizeof(ot_hash)); |
| 329 | top100s[idx].val = torrent->peer_list->seed_count; | 363 | top100l[idx].val = leech_count; |
| 330 | } | 364 | } |
| 331 | } | 365 | } |
| 332 | mutex_bucket_unlock( bucket, 0 ); | 366 | mutex_bucket_unlock(bucket, 0); |
| 333 | if( !g_opentracker_running ) | 367 | if (!g_opentracker_running) |
| 334 | return 0; | 368 | return 0; |
| 335 | } | 369 | } |
| 336 | 370 | ||
| 337 | r += sprintf( r, "Top %d torrents by peers:\n", amount ); | 371 | r += sprintf(r, "Top %d torrents by peers:\n", amount); |
| 338 | for( idx=0; idx<amount; ++idx ) | 372 | for (idx = 0; idx < amount; ++idx) |
| 339 | if( top100c[idx].val ) | 373 | if (top100c[idx].val) |
| 340 | r += sprintf( r, "\t%zd\t%s\n", top100c[idx].val, to_hex( hex_out, top100c[idx].hash) ); | 374 | r += sprintf(r, "\t%zd\t%s\n", top100c[idx].val, to_hex(hex_out, top100c[idx].hash)); |
| 341 | r += sprintf( r, "Top %d torrents by seeds:\n", amount ); | 375 | r += sprintf(r, "Top %d torrents by seeds:\n", amount); |
| 342 | for( idx=0; idx<amount; ++idx ) | 376 | for (idx = 0; idx < amount; ++idx) |
| 343 | if( top100s[idx].val ) | 377 | if (top100s[idx].val) |
| 344 | r += sprintf( r, "\t%zd\t%s\n", top100s[idx].val, to_hex( hex_out, top100s[idx].hash) ); | 378 | r += sprintf(r, "\t%zd\t%s\n", top100s[idx].val, to_hex(hex_out, top100s[idx].hash)); |
| 379 | r += sprintf(r, "Top %d torrents by leechers:\n", amount); | ||
| 380 | for (idx = 0; idx < amount; ++idx) | ||
| 381 | if (top100l[idx].val) | ||
| 382 | r += sprintf(r, "\t%zd\t%s\n", top100l[idx].val, to_hex(hex_out, top100l[idx].hash)); | ||
| 345 | 383 | ||
| 346 | return r - reply; | 384 | return r - reply; |
| 347 | } | 385 | } |
| 348 | 386 | ||
| 349 | static unsigned long events_per_time( unsigned long long events, time_t t ) { | 387 | static unsigned long events_per_time(unsigned long long events, time_t t) { return events / ((unsigned int)t ? (unsigned int)t : 1); } |
| 350 | return events / ( (unsigned int)t ? (unsigned int)t : 1 ); | ||
| 351 | } | ||
| 352 | 388 | ||
| 353 | static size_t stats_connections_mrtg( char * reply ) { | 389 | static size_t stats_connections_mrtg(char *reply) { |
| 354 | ot_time t = time( NULL ) - ot_start_time; | 390 | ot_time t = time(NULL) - ot_start_time; |
| 355 | return sprintf( reply, | 391 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", |
| 356 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", | 392 | ot_overall_tcp_connections + ot_overall_udp_connections, |
| 357 | ot_overall_tcp_connections+ot_overall_udp_connections, | 393 | ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, (int)t, (int)(t / 3600), |
| 358 | ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, | 394 | events_per_time(ot_overall_tcp_connections + ot_overall_udp_connections, t), |
| 359 | (int)t, | 395 | events_per_time(ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, t)); |
| 360 | (int)(t / 3600), | ||
| 361 | events_per_time( ot_overall_tcp_connections+ot_overall_udp_connections, t ), | ||
| 362 | events_per_time( ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, t ) | ||
| 363 | ); | ||
| 364 | } | 396 | } |
| 365 | 397 | ||
| 366 | static size_t stats_udpconnections_mrtg( char * reply ) { | 398 | static size_t stats_udpconnections_mrtg(char *reply) { |
| 367 | ot_time t = time( NULL ) - ot_start_time; | 399 | ot_time t = time(NULL) - ot_start_time; |
| 368 | return sprintf( reply, | 400 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker udp4 stats, %lu conns/s :: %lu success/s.", ot_overall_udp_connections, |
| 369 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker udp4 stats, %lu conns/s :: %lu success/s.", | 401 | ot_overall_udp_successfulannounces + ot_overall_udp_connects, (int)t, (int)(t / 3600), events_per_time(ot_overall_udp_connections, t), |
| 370 | ot_overall_udp_connections, | 402 | events_per_time(ot_overall_udp_successfulannounces + ot_overall_udp_connects, t)); |
| 371 | ot_overall_udp_successfulannounces+ot_overall_udp_connects, | ||
| 372 | (int)t, | ||
| 373 | (int)(t / 3600), | ||
| 374 | events_per_time( ot_overall_udp_connections, t ), | ||
| 375 | events_per_time( ot_overall_udp_successfulannounces+ot_overall_udp_connects, t ) | ||
| 376 | ); | ||
| 377 | } | 403 | } |
| 378 | 404 | ||
| 379 | static size_t stats_tcpconnections_mrtg( char * reply ) { | 405 | static size_t stats_tcpconnections_mrtg(char *reply) { |
| 380 | time_t t = time( NULL ) - ot_start_time; | 406 | time_t t = time(NULL) - ot_start_time; |
| 381 | return sprintf( reply, | 407 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker tcp4 stats, %lu conns/s :: %lu success/s.", ot_overall_tcp_connections, |
| 382 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker tcp4 stats, %lu conns/s :: %lu success/s.", | 408 | ot_overall_tcp_successfulannounces, (int)t, (int)(t / 3600), events_per_time(ot_overall_tcp_connections, t), |
| 383 | ot_overall_tcp_connections, | 409 | events_per_time(ot_overall_tcp_successfulannounces, t)); |
| 384 | ot_overall_tcp_successfulannounces, | ||
| 385 | (int)t, | ||
| 386 | (int)(t / 3600), | ||
| 387 | events_per_time( ot_overall_tcp_connections, t ), | ||
| 388 | events_per_time( ot_overall_tcp_successfulannounces, t ) | ||
| 389 | ); | ||
| 390 | } | 410 | } |
| 391 | 411 | ||
| 392 | static size_t stats_scrape_mrtg( char * reply ) { | 412 | static size_t stats_scrape_mrtg(char *reply) { |
| 393 | time_t t = time( NULL ) - ot_start_time; | 413 | time_t t = time(NULL) - ot_start_time; |
| 394 | return sprintf( reply, | 414 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker scrape stats, %lu scrape/s (tcp and udp)", ot_overall_tcp_successfulscrapes, |
| 395 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker scrape stats, %lu scrape/s (tcp and udp)", | 415 | ot_overall_udp_successfulscrapes, (int)t, (int)(t / 3600), |
| 396 | ot_overall_tcp_successfulscrapes, | 416 | events_per_time((ot_overall_tcp_successfulscrapes + ot_overall_udp_successfulscrapes), t)); |
| 397 | ot_overall_udp_successfulscrapes, | ||
| 398 | (int)t, | ||
| 399 | (int)(t / 3600), | ||
| 400 | events_per_time( (ot_overall_tcp_successfulscrapes+ot_overall_udp_successfulscrapes), t ) | ||
| 401 | ); | ||
| 402 | } | 417 | } |
| 403 | 418 | ||
| 404 | static size_t stats_fullscrapes_mrtg( char * reply ) { | 419 | static size_t stats_fullscrapes_mrtg(char *reply) { |
| 405 | ot_time t = time( NULL ) - ot_start_time; | 420 | ot_time t = time(NULL) - ot_start_time; |
| 406 | return sprintf( reply, | 421 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker full scrape stats, %lu conns/s :: %lu bytes/s.", ot_full_scrape_count * 1000, |
| 407 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker full scrape stats, %lu conns/s :: %lu bytes/s.", | 422 | ot_full_scrape_size, (int)t, (int)(t / 3600), events_per_time(ot_full_scrape_count, t), events_per_time(ot_full_scrape_size, t)); |
| 408 | ot_full_scrape_count * 1000, | ||
| 409 | ot_full_scrape_size, | ||
| 410 | (int)t, | ||
| 411 | (int)(t / 3600), | ||
| 412 | events_per_time( ot_full_scrape_count, t ), | ||
| 413 | events_per_time( ot_full_scrape_size, t ) | ||
| 414 | ); | ||
| 415 | } | 423 | } |
| 416 | 424 | ||
| 417 | static size_t stats_peers_mrtg( char * reply ) { | 425 | static size_t stats_peers_mrtg(char *reply) { |
| 418 | torrent_stats stats = {0,0,0}; | 426 | torrent_stats stats = {0, 0, 0}; |
| 419 | 427 | ||
| 420 | iterate_all_torrents( torrent_statter, (uintptr_t)&stats ); | 428 | iterate_all_torrents(torrent_statter, (uintptr_t)&stats); |
| 421 | 429 | ||
| 422 | return sprintf( reply, "%llu\n%llu\nopentracker serving %llu torrents\nopentracker", | 430 | return sprintf(reply, "%llu\n%llu\nopentracker serving %llu torrents\nopentracker", stats.peer_count, stats.seed_count, stats.torrent_count); |
| 423 | stats.peer_count, | ||
| 424 | stats.seed_count, | ||
| 425 | stats.torrent_count | ||
| 426 | ); | ||
| 427 | } | 431 | } |
| 428 | 432 | ||
| 429 | static size_t stats_torrents_mrtg( char * reply ) | 433 | static size_t stats_torrents_mrtg(char *reply) { |
| 430 | { | ||
| 431 | size_t torrent_count = mutex_get_torrent_count(); | 434 | size_t torrent_count = mutex_get_torrent_count(); |
| 432 | 435 | ||
| 433 | return sprintf( reply, "%zd\n%zd\nopentracker serving %zd torrents\nopentracker", | 436 | return sprintf(reply, "%zd\n%zd\nopentracker serving %zd torrents\nopentracker", torrent_count, (size_t)0, torrent_count); |
| 434 | torrent_count, | ||
| 435 | (size_t)0, | ||
| 436 | torrent_count | ||
| 437 | ); | ||
| 438 | } | 437 | } |
| 439 | 438 | ||
| 440 | static size_t stats_httperrors_txt ( char * reply ) { | 439 | static size_t stats_httperrors_txt(char *reply) { |
| 441 | return sprintf( reply, "302 RED %llu\n400 ... %llu\n400 PAR %llu\n400 COM %llu\n403 IP %llu\n404 INV %llu\n500 SRV %llu\n", | 440 | return sprintf(reply, "302 RED %llu\n400 ... %llu\n400 PAR %llu\n400 COM %llu\n403 IP %llu\n404 INV %llu\n500 SRV %llu\n", ot_failed_request_counts[0], |
| 442 | ot_failed_request_counts[0], ot_failed_request_counts[1], ot_failed_request_counts[2], | 441 | ot_failed_request_counts[1], ot_failed_request_counts[2], ot_failed_request_counts[3], ot_failed_request_counts[4], |
| 443 | ot_failed_request_counts[3], ot_failed_request_counts[4], ot_failed_request_counts[5], | 442 | ot_failed_request_counts[5], ot_failed_request_counts[6]); |
| 444 | ot_failed_request_counts[6] ); | ||
| 445 | } | 443 | } |
| 446 | 444 | ||
| 447 | static size_t stats_return_renew_bucket( char * reply ) { | 445 | static size_t stats_return_renew_bucket(char *reply) { |
| 448 | char *r = reply; | 446 | char *r = reply; |
| 449 | int i; | 447 | int i; |
| 450 | 448 | ||
| 451 | for( i=0; i<OT_PEER_TIMEOUT; ++i ) | 449 | for (i = 0; i < OT_PEER_TIMEOUT; ++i) |
| 452 | r+=sprintf(r,"%02i %llu\n", i, ot_renewed[i] ); | 450 | r += sprintf(r, "%02i %llu\n", i, ot_renewed[i]); |
| 453 | return r - reply; | 451 | return r - reply; |
| 454 | } | 452 | } |
| 455 | 453 | ||
| 456 | static size_t stats_return_sync_mrtg( char * reply ) { | 454 | static size_t stats_return_sync_mrtg(char *reply) { |
| 457 | ot_time t = time( NULL ) - ot_start_time; | 455 | ot_time t = time(NULL) - ot_start_time; |
| 458 | return sprintf( reply, | 456 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", ot_overall_sync_count, 0LL, (int)t, |
| 459 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker connections, %lu conns/s :: %lu success/s.", | 457 | (int)(t / 3600), events_per_time(ot_overall_tcp_connections + ot_overall_udp_connections, t), |
| 460 | ot_overall_sync_count, | 458 | events_per_time(ot_overall_tcp_successfulannounces + ot_overall_udp_successfulannounces + ot_overall_udp_connects, t)); |
| 461 | 0LL, | ||
| 462 | (int)t, | ||
| 463 | (int)(t / 3600), | ||
| 464 | events_per_time( ot_overall_tcp_connections+ot_overall_udp_connections, t ), | ||
| 465 | events_per_time( ot_overall_tcp_successfulannounces+ot_overall_udp_successfulannounces+ot_overall_udp_connects, t ) | ||
| 466 | ); | ||
| 467 | } | 459 | } |
| 468 | 460 | ||
| 469 | static size_t stats_return_completed_mrtg( char * reply ) { | 461 | static size_t stats_return_completed_mrtg(char *reply) { |
| 470 | ot_time t = time( NULL ) - ot_start_time; | 462 | ot_time t = time(NULL) - ot_start_time; |
| 471 | 463 | ||
| 472 | return sprintf( reply, | 464 | return sprintf(reply, "%llu\n%llu\n%i seconds (%i hours)\nopentracker, %lu completed/h.", ot_overall_completed, 0LL, (int)t, (int)(t / 3600), |
| 473 | "%llu\n%llu\n%i seconds (%i hours)\nopentracker, %lu completed/h.", | 465 | events_per_time(ot_overall_completed, t / 3600)); |
| 474 | ot_overall_completed, | ||
| 475 | 0LL, | ||
| 476 | (int)t, | ||
| 477 | (int)(t / 3600), | ||
| 478 | events_per_time( ot_overall_completed, t / 3600 ) | ||
| 479 | ); | ||
| 480 | } | 466 | } |
| 481 | 467 | ||
| 482 | #ifdef WANT_LOG_NUMWANT | 468 | #ifdef WANT_LOG_NUMWANT |
| 483 | extern unsigned long long numwants[201]; | 469 | extern unsigned long long numwants[201]; |
| 484 | static size_t stats_return_numwants( char * reply ) { | 470 | static size_t stats_return_numwants(char *reply) { |
| 485 | char * r = reply; | 471 | char *r = reply; |
| 486 | int i; | 472 | int i; |
| 487 | for( i=0; i<=200; ++i ) | 473 | for (i = 0; i <= 200; ++i) |
| 488 | r += sprintf( r, "%03d => %lld\n", i, numwants[i] ); | 474 | r += sprintf(r, "%03d => %lld\n", i, numwants[i]); |
| 489 | return r-reply; | 475 | return r - reply; |
| 490 | } | 476 | } |
| 491 | #endif | 477 | #endif |
| 492 | 478 | ||
| 493 | #ifdef WANT_FULLLOG_NETWORKS | 479 | #ifdef WANT_FULLLOG_NETWORKS |
| 494 | static void stats_return_fulllog( int *iovec_entries, struct iovec **iovector, char *r ) { | 480 | static void stats_return_fulllog(int *iovec_entries, struct iovec **iovector, char *r) { |
| 495 | ot_log *loglist = g_logchain_first, *llnext; | 481 | ot_log *loglist = g_logchain_first, *llnext; |
| 496 | char * re = r + OT_STATS_TMPSIZE; | 482 | char *re = r + OT_STATS_TMPSIZE; |
| 497 | 483 | ||
| 498 | g_logchain_first = g_logchain_last = 0; | 484 | g_logchain_first = g_logchain_last = 0; |
| 499 | 485 | ||
| 500 | while( loglist ) { | 486 | while (loglist) { |
| 501 | if( r + ( loglist->size + 64 ) >= re ) { | 487 | if (r + (loglist->size + 64) >= re) { |
| 502 | r = iovec_fix_increase_or_free( iovec_entries, iovector, r, 32 * OT_STATS_TMPSIZE ); | 488 | r = iovec_fix_increase_or_free(iovec_entries, iovector, r, 32 * OT_STATS_TMPSIZE); |
| 503 | if( !r ) return; | 489 | if (!r) |
| 490 | return; | ||
| 504 | re = r + 32 * OT_STATS_TMPSIZE; | 491 | re = r + 32 * OT_STATS_TMPSIZE; |
| 505 | } | 492 | } |
| 506 | r += sprintf( r, "%08ld: ", loglist->time ); | 493 | r += sprintf(r, "%08ld: ", loglist->time); |
| 507 | r += fmt_ip6c( r, loglist->ip ); | 494 | r += fmt_ip6c(r, loglist->ip); |
| 508 | *r++ = '\n'; | 495 | *r++ = '\n'; |
| 509 | memcpy( r, loglist->data, loglist->size ); | 496 | memcpy(r, loglist->data, loglist->size); |
| 510 | r += loglist->size; | 497 | r += loglist->size; |
| 511 | *r++ = '\n'; | 498 | *r++ = '\n'; |
| 512 | *r++ = '*'; | 499 | *r++ = '*'; |
| 513 | *r++ = '\n'; | 500 | *r++ = '\n'; |
| 514 | *r++ = '\n'; | 501 | *r++ = '\n'; |
| 515 | 502 | ||
| 516 | llnext = loglist->next; | 503 | llnext = loglist->next; |
| 517 | free( loglist->data ); | 504 | free(loglist->data); |
| 518 | free( loglist ); | 505 | free(loglist); |
| 519 | loglist = llnext; | 506 | loglist = llnext; |
| 520 | } | 507 | } |
| 521 | iovec_fixlast( iovec_entries, iovector, r ); | 508 | iovec_fixlast(iovec_entries, iovector, r); |
| 522 | } | 509 | } |
| 523 | #endif | 510 | #endif |
| 524 | 511 | ||
| 525 | static size_t stats_return_everything( char * reply ) { | 512 | static size_t stats_return_everything(char *reply) { |
| 526 | torrent_stats stats = {0,0,0}; | 513 | torrent_stats stats = {0, 0, 0}; |
| 527 | int i; | 514 | int i; |
| 528 | char * r = reply; | 515 | char *r = reply; |
| 529 | 516 | ||
| 530 | iterate_all_torrents( torrent_statter, (uintptr_t)&stats ); | 517 | iterate_all_torrents(torrent_statter, (uintptr_t)&stats); |
| 531 | 518 | ||
| 532 | r += sprintf( r, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n" ); | 519 | r += sprintf(r, "<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n"); |
| 533 | r += sprintf( r, "<stats>\n" ); | 520 | r += sprintf(r, "<stats>\n"); |
| 534 | r += sprintf( r, " <tracker_id>%" PRIu32 "</tracker_id>\n", g_tracker_id ); | 521 | r += sprintf(r, " <tracker_id>%" PRIu32 "</tracker_id>\n", g_tracker_id); |
| 535 | r += sprintf( r, " <version>\n" ); r += stats_return_tracker_version( r ); r += sprintf( r, " </version>\n" ); | 522 | r += sprintf(r, " <version>\n"); |
| 536 | r += sprintf( r, " <uptime>%llu</uptime>\n", (unsigned long long)(time( NULL ) - ot_start_time) ); | 523 | r += stats_return_tracker_version(r); |
| 537 | r += sprintf( r, " <torrents>\n" ); | 524 | r += sprintf(r, " </version>\n"); |
| 538 | r += sprintf( r, " <count_mutex>%zd</count_mutex>\n", mutex_get_torrent_count() ); | 525 | r += sprintf(r, " <uptime>%llu</uptime>\n", (unsigned long long)(time(NULL) - ot_start_time)); |
| 539 | r += sprintf( r, " <count_iterator>%llu</count_iterator>\n", stats.torrent_count ); | 526 | r += sprintf(r, " <torrents>\n"); |
| 540 | r += sprintf( r, " </torrents>\n" ); | 527 | r += sprintf(r, " <count_mutex>%zd</count_mutex>\n", mutex_get_torrent_count()); |
| 541 | r += sprintf( r, " <peers>\n <count>%llu</count>\n </peers>\n", stats.peer_count ); | 528 | r += sprintf(r, " <count_iterator>%llu</count_iterator>\n", stats.torrent_count); |
| 542 | r += sprintf( r, " <seeds>\n <count>%llu</count>\n </seeds>\n", stats.seed_count ); | 529 | r += sprintf(r, " </torrents>\n"); |
| 543 | r += sprintf( r, " <completed>\n <count>%llu</count>\n </completed>\n", ot_overall_completed ); | 530 | r += sprintf(r, " <peers>\n <count>%llu</count>\n </peers>\n", stats.peer_count); |
| 544 | r += sprintf( r, " <connections>\n" ); | 531 | r += sprintf(r, " <seeds>\n <count>%llu</count>\n </seeds>\n", stats.seed_count); |
| 545 | r += sprintf( r, " <tcp>\n <accept>%llu</accept>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n </tcp>\n", ot_overall_tcp_connections, ot_overall_tcp_successfulannounces, ot_overall_tcp_successfulscrapes ); | 532 | r += sprintf(r, " <completed>\n <count>%llu</count>\n </completed>\n", ot_overall_completed); |
| 546 | r += sprintf( r, " <udp>\n <overall>%llu</overall>\n <connect>%llu</connect>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n <missmatch>%llu</missmatch>\n </udp>\n", ot_overall_udp_connections, ot_overall_udp_connects, ot_overall_udp_successfulannounces, ot_overall_udp_successfulscrapes, ot_overall_udp_connectionidmissmatches ); | 533 | r += sprintf(r, " <connections>\n"); |
| 547 | r += sprintf( r, " <livesync>\n <count>%llu</count>\n </livesync>\n", ot_overall_sync_count ); | 534 | r += sprintf(r, " <tcp>\n <accept>%llu</accept>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n </tcp>\n", |
| 548 | r += sprintf( r, " </connections>\n" ); | 535 | ot_overall_tcp_connections, ot_overall_tcp_successfulannounces, ot_overall_tcp_successfulscrapes); |
| 549 | r += sprintf( r, " <debug>\n" ); | 536 | r += sprintf(r, " <udp>\n <overall>%llu</overall>\n <connect>%llu</connect>\n <announce>%llu</announce>\n <scrape>%llu</scrape>\n <missmatch>%llu</missmatch>\n </udp>\n", |
| 550 | r += sprintf( r, " <renew>\n" ); | 537 | ot_overall_udp_connections, ot_overall_udp_connects, ot_overall_udp_successfulannounces, ot_overall_udp_successfulscrapes, |
| 551 | for( i=0; i<OT_PEER_TIMEOUT; ++i ) | 538 | ot_overall_udp_connectionidmissmatches); |
| 552 | r += sprintf( r, " <count interval=\"%02i\">%llu</count>\n", i, ot_renewed[i] ); | 539 | r += sprintf(r, " <livesync>\n <count>%llu</count>\n </livesync>\n", ot_overall_sync_count); |
| 553 | r += sprintf( r, " </renew>\n" ); | 540 | r += sprintf(r, " </connections>\n"); |
| 554 | r += sprintf( r, " <http_error>\n" ); | 541 | r += sprintf(r, " <debug>\n"); |
| 555 | for( i=0; i<CODE_HTTPERROR_COUNT; ++i ) | 542 | r += sprintf(r, " <renew>\n"); |
| 556 | r += sprintf( r, " <count code=\"%s\">%llu</count>\n", ot_failed_request_names[i], ot_failed_request_counts[i] ); | 543 | for (i = 0; i < OT_PEER_TIMEOUT; ++i) |
| 557 | r += sprintf( r, " </http_error>\n" ); | 544 | r += sprintf(r, " <count interval=\"%02i\">%llu</count>\n", i, ot_renewed[i]); |
| 558 | r += sprintf( r, " <mutex_stall>\n <count>%llu</count>\n </mutex_stall>\n", ot_overall_stall_count ); | 545 | r += sprintf(r, " </renew>\n"); |
| 559 | r += sprintf( r, " </debug>\n" ); | 546 | r += sprintf(r, " <http_error>\n"); |
| 560 | r += sprintf( r, "</stats>" ); | 547 | for (i = 0; i < CODE_HTTPERROR_COUNT; ++i) |
| 548 | r += sprintf(r, " <count code=\"%s\">%llu</count>\n", ot_failed_request_names[i], ot_failed_request_counts[i]); | ||
| 549 | r += sprintf(r, " </http_error>\n"); | ||
| 550 | r += sprintf(r, " <mutex_stall>\n <count>%llu</count>\n </mutex_stall>\n", ot_overall_stall_count); | ||
| 551 | r += sprintf(r, " </debug>\n"); | ||
| 552 | r += sprintf(r, "</stats>"); | ||
| 561 | return r - reply; | 553 | return r - reply; |
| 562 | } | 554 | } |
| 563 | 555 | ||
| 564 | extern const char | 556 | size_t stats_return_tracker_version(char *reply) { |
| 565 | *g_version_opentracker_c, *g_version_accesslist_c, *g_version_clean_c, *g_version_fullscrape_c, *g_version_http_c, | 557 | #define QUOTE(name) #name |
| 566 | *g_version_iovec_c, *g_version_mutex_c, *g_version_stats_c, *g_version_udp_c, *g_version_vector_c, | 558 | #define SQUOTE(name) QUOTE(name) |
| 567 | *g_version_scan_urlencoded_query_c, *g_version_trackerlogic_c, *g_version_livesync_c, *g_version_rijndael_c; | 559 | return sprintf(reply, "https://erdgeist.org/gitweb/opentracker/commit/?id=" SQUOTE(GIT_VERSION) "\n"); |
| 568 | 560 | } | |
| 569 | size_t stats_return_tracker_version( char *reply ) { | 561 | |
| 570 | return sprintf( reply, "%s%s%s%s%s%s%s%s%s%s%s%s%s%s", | 562 | size_t return_stats_for_tracker(char *reply, int mode, int format) { |
| 571 | g_version_opentracker_c, g_version_accesslist_c, g_version_clean_c, g_version_fullscrape_c, g_version_http_c, | 563 | (void)format; |
| 572 | g_version_iovec_c, g_version_mutex_c, g_version_stats_c, g_version_udp_c, g_version_vector_c, | 564 | switch (mode & TASK_TASK_MASK) { |
| 573 | g_version_scan_urlencoded_query_c, g_version_trackerlogic_c, g_version_livesync_c, g_version_rijndael_c ); | 565 | case TASK_STATS_CONNS: |
| 574 | } | 566 | return stats_connections_mrtg(reply); |
| 575 | 567 | case TASK_STATS_SCRAPE: | |
| 576 | size_t return_stats_for_tracker( char *reply, int mode, int format ) { | 568 | return stats_scrape_mrtg(reply); |
| 577 | (void) format; | 569 | case TASK_STATS_UDP: |
| 578 | switch( mode & TASK_TASK_MASK ) { | 570 | return stats_udpconnections_mrtg(reply); |
| 579 | case TASK_STATS_CONNS: | 571 | case TASK_STATS_TCP: |
| 580 | return stats_connections_mrtg( reply ); | 572 | return stats_tcpconnections_mrtg(reply); |
| 581 | case TASK_STATS_SCRAPE: | 573 | case TASK_STATS_FULLSCRAPE: |
| 582 | return stats_scrape_mrtg( reply ); | 574 | return stats_fullscrapes_mrtg(reply); |
| 583 | case TASK_STATS_UDP: | 575 | case TASK_STATS_COMPLETED: |
| 584 | return stats_udpconnections_mrtg( reply ); | 576 | return stats_return_completed_mrtg(reply); |
| 585 | case TASK_STATS_TCP: | 577 | case TASK_STATS_HTTPERRORS: |
| 586 | return stats_tcpconnections_mrtg( reply ); | 578 | return stats_httperrors_txt(reply); |
| 587 | case TASK_STATS_FULLSCRAPE: | 579 | case TASK_STATS_VERSION: |
| 588 | return stats_fullscrapes_mrtg( reply ); | 580 | return stats_return_tracker_version(reply); |
| 589 | case TASK_STATS_COMPLETED: | 581 | case TASK_STATS_RENEW: |
| 590 | return stats_return_completed_mrtg( reply ); | 582 | return stats_return_renew_bucket(reply); |
| 591 | case TASK_STATS_HTTPERRORS: | 583 | case TASK_STATS_SYNCS: |
| 592 | return stats_httperrors_txt( reply ); | 584 | return stats_return_sync_mrtg(reply); |
| 593 | case TASK_STATS_VERSION: | ||
| 594 | return stats_return_tracker_version( reply ); | ||
| 595 | case TASK_STATS_RENEW: | ||
| 596 | return stats_return_renew_bucket( reply ); | ||
| 597 | case TASK_STATS_SYNCS: | ||
| 598 | return stats_return_sync_mrtg( reply ); | ||
| 599 | #ifdef WANT_LOG_NUMWANT | 585 | #ifdef WANT_LOG_NUMWANT |
| 600 | case TASK_STATS_NUMWANTS: | 586 | case TASK_STATS_NUMWANTS: |
| 601 | return stats_return_numwants( reply ); | 587 | return stats_return_numwants(reply); |
| 602 | #endif | 588 | #endif |
| 603 | default: | 589 | default: |
| 604 | return 0; | 590 | return 0; |
| 605 | } | 591 | } |
| 606 | } | 592 | } |
| 607 | 593 | ||
| 608 | static void stats_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) { | 594 | static void stats_make(int *iovec_entries, struct iovec **iovector, ot_tasktype mode) { |
| 609 | char *r; | 595 | char *r; |
| 610 | 596 | ||
| 611 | *iovec_entries = 0; | 597 | *iovec_entries = 0; |
| 612 | *iovector = NULL; | 598 | *iovector = NULL; |
| 613 | if( !( r = iovec_increase( iovec_entries, iovector, OT_STATS_TMPSIZE ) ) ) | 599 | if (!(r = iovec_increase(iovec_entries, iovector, OT_STATS_TMPSIZE))) |
| 614 | return; | 600 | return; |
| 615 | 601 | ||
| 616 | switch( mode & TASK_TASK_MASK ) { | 602 | switch (mode & TASK_TASK_MASK) { |
| 617 | case TASK_STATS_TORRENTS: r += stats_torrents_mrtg( r ); break; | 603 | case TASK_STATS_TORRENTS: |
| 618 | case TASK_STATS_PEERS: r += stats_peers_mrtg( r ); break; | 604 | r += stats_torrents_mrtg(r); |
| 619 | case TASK_STATS_SLASH24S: r += stats_slash24s_txt( r, 128 ); break; | 605 | break; |
| 620 | case TASK_STATS_TOP10: r += stats_top_txt( r, 10 ); break; | 606 | case TASK_STATS_PEERS: |
| 621 | case TASK_STATS_TOP100: | 607 | r += stats_peers_mrtg(r); |
| 622 | r = iovec_fix_increase_or_free( iovec_entries, iovector, r, 4 * OT_STATS_TMPSIZE ); | 608 | break; |
| 623 | if( !r ) return; | 609 | case TASK_STATS_SLASH24S: |
| 624 | r += stats_top_txt( r, 100 ); break; | 610 | r += stats_slash24s_txt(r, 128); |
| 625 | case TASK_STATS_EVERYTHING: r += stats_return_everything( r ); break; | 611 | break; |
| 612 | case TASK_STATS_TOP10: | ||
| 613 | r += stats_top_txt(r, 10); | ||
| 614 | break; | ||
| 615 | case TASK_STATS_TOP100: | ||
| 616 | r = iovec_fix_increase_or_free(iovec_entries, iovector, r, 4 * OT_STATS_TMPSIZE); | ||
| 617 | if (!r) | ||
| 618 | return; | ||
| 619 | r += stats_top_txt(r, 100); | ||
| 620 | break; | ||
| 621 | case TASK_STATS_EVERYTHING: | ||
| 622 | r = iovec_fix_increase_or_free(iovec_entries, iovector, r, OT_STATS_TMPSIZE + 64 * OT_PEER_TIMEOUT); | ||
| 623 | if (!r) | ||
| 624 | return; | ||
| 625 | r += stats_return_everything(r); | ||
| 626 | break; | ||
| 626 | #ifdef WANT_SPOT_WOODPECKER | 627 | #ifdef WANT_SPOT_WOODPECKER |
| 627 | case TASK_STATS_WOODPECKERS: r += stats_return_woodpeckers( r, 128 ); break; | 628 | case TASK_STATS_WOODPECKERS: |
| 629 | r += stats_return_woodpeckers(r, 128); | ||
| 630 | break; | ||
| 628 | #endif | 631 | #endif |
| 629 | #ifdef WANT_FULLLOG_NETWORKS | 632 | #ifdef WANT_FULLLOG_NETWORKS |
| 630 | case TASK_STATS_FULLLOG: stats_return_fulllog( iovec_entries, iovector, r ); | 633 | case TASK_STATS_FULLLOG: |
| 631 | return; | 634 | stats_return_fulllog(iovec_entries, iovector, r); |
| 635 | return; | ||
| 632 | #endif | 636 | #endif |
| 633 | default: | 637 | default: |
| 634 | iovec_free(iovec_entries, iovector); | 638 | iovec_free(iovec_entries, iovector); |
| 635 | return; | 639 | return; |
| 636 | } | 640 | } |
| 637 | iovec_fixlast( iovec_entries, iovector, r ); | 641 | iovec_fixlast(iovec_entries, iovector, r); |
| 638 | } | 642 | } |
| 639 | 643 | ||
| 640 | void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ) { | 644 | void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data) { |
| 641 | switch( event ) { | 645 | switch (event) { |
| 642 | case EVENT_ACCEPT: | 646 | case EVENT_ACCEPT: |
| 643 | if( proto == FLAG_TCP ) ot_overall_tcp_connections++; else ot_overall_udp_connections++; | 647 | if (proto == FLAG_TCP) |
| 648 | ot_overall_tcp_connections++; | ||
| 649 | else | ||
| 650 | ot_overall_udp_connections++; | ||
| 644 | #ifdef WANT_LOG_NETWORKS | 651 | #ifdef WANT_LOG_NETWORKS |
| 645 | stat_increase_network_count( &stats_network_counters_root, 0, event_data ); | 652 | stat_increase_network_count(&stats_network_counters_root, 0, event_data); |
| 646 | #endif | 653 | #endif |
| 647 | break; | 654 | break; |
| 648 | case EVENT_ANNOUNCE: | 655 | case EVENT_ANNOUNCE: |
| 649 | if( proto == FLAG_TCP ) ot_overall_tcp_successfulannounces++; else ot_overall_udp_successfulannounces++; | 656 | if (proto == FLAG_TCP) |
| 650 | break; | 657 | ot_overall_tcp_successfulannounces++; |
| 651 | case EVENT_CONNECT: | 658 | else |
| 652 | if( proto == FLAG_TCP ) ot_overall_tcp_connects++; else ot_overall_udp_connects++; | 659 | ot_overall_udp_successfulannounces++; |
| 653 | break; | 660 | break; |
| 654 | case EVENT_COMPLETED: | 661 | case EVENT_CONNECT: |
| 662 | if (proto == FLAG_TCP) | ||
| 663 | ot_overall_tcp_connects++; | ||
| 664 | else | ||
| 665 | ot_overall_udp_connects++; | ||
| 666 | break; | ||
| 667 | case EVENT_COMPLETED: | ||
| 655 | #ifdef WANT_SYSLOGS | 668 | #ifdef WANT_SYSLOGS |
| 656 | if( event_data) { | 669 | if (event_data) { |
| 657 | struct ot_workstruct *ws = (struct ot_workstruct *)event_data; | 670 | struct ot_workstruct *ws = (struct ot_workstruct *)event_data; |
| 658 | char timestring[64]; | 671 | char timestring[64]; |
| 659 | char hash_hex[42], peerid_hex[42], ip_readable[64]; | 672 | char hash_hex[42], peerid_hex[42], ip_readable[64]; |
| 660 | struct tm time_now; | 673 | struct tm time_now; |
| 661 | time_t ttt; | 674 | time_t ttt; |
| 662 | 675 | ||
| 663 | time( &ttt ); | 676 | time(&ttt); |
| 664 | localtime_r( &ttt, &time_now ); | 677 | localtime_r(&ttt, &time_now); |
| 665 | strftime( timestring, sizeof( timestring ), "%FT%T%z", &time_now ); | 678 | strftime(timestring, sizeof(timestring), "%FT%T%z", &time_now); |
| 666 | 679 | ||
| 667 | to_hex( hash_hex, *ws->hash ); | 680 | to_hex(hash_hex, *ws->hash); |
| 668 | if( ws->peer_id ) | 681 | if (ws->peer_id) |
| 669 | to_hex( peerid_hex, (uint8_t*)ws->peer_id ); | 682 | to_hex(peerid_hex, (uint8_t *)ws->peer_id); |
| 670 | else { | 683 | else { |
| 671 | *peerid_hex=0; | 684 | *peerid_hex = 0; |
| 672 | } | 685 | } |
| 673 | 686 | ||
| 674 | #ifdef WANT_V6 | 687 | ip_readable[fmt_ip6c(ip_readable, (char *)&ws->peer)] = 0; |
| 675 | ip_readable[ fmt_ip6c( ip_readable, (char*)&ws->peer ) ] = 0; | 688 | #if 0 |
| 676 | #else | 689 | /* XXX */ |
| 677 | ip_readable[ fmt_ip4( ip_readable, (char*)&ws->peer ) ] = 0; | 690 | ip_readable[ fmt_ip4( ip_readable, (char*)&ws->peer ) ] = 0; |
| 678 | #endif | 691 | #endif |
| 679 | syslog( LOG_INFO, "time=%s event=completed info_hash=%s peer_id=%s ip=%s", timestring, hash_hex, peerid_hex, ip_readable ); | 692 | syslog(LOG_INFO, "time=%s event=completed info_hash=%s peer_id=%s ip=%s", timestring, hash_hex, peerid_hex, ip_readable); |
| 680 | } | ||
| 681 | #endif | ||
| 682 | ot_overall_completed++; | ||
| 683 | break; | ||
| 684 | case EVENT_SCRAPE: | ||
| 685 | if( proto == FLAG_TCP ) ot_overall_tcp_successfulscrapes++; else ot_overall_udp_successfulscrapes++; | ||
| 686 | break; | ||
| 687 | case EVENT_FULLSCRAPE: | ||
| 688 | ot_full_scrape_count++; | ||
| 689 | ot_full_scrape_size += event_data; | ||
| 690 | break; | ||
| 691 | case EVENT_FULLSCRAPE_REQUEST: | ||
| 692 | { | ||
| 693 | ot_ip6 *ip = (ot_ip6*)event_data; /* ugly hack to transfer ip to stats */ | ||
| 694 | char _debug[512]; | ||
| 695 | int off = snprintf( _debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time)/60 ); | ||
| 696 | off += fmt_ip6c( _debug+off, *ip ); | ||
| 697 | off += snprintf( _debug+off, sizeof(_debug)-off, " - FULL SCRAPE\n" ); | ||
| 698 | (void)write( 2, _debug, off ); | ||
| 699 | ot_full_scrape_request_count++; | ||
| 700 | } | 693 | } |
| 701 | break; | 694 | #endif |
| 702 | case EVENT_FULLSCRAPE_REQUEST_GZIP: | 695 | ot_overall_completed++; |
| 703 | { | 696 | break; |
| 704 | ot_ip6 *ip = (ot_ip6*)event_data; /* ugly hack to transfer ip to stats */ | 697 | case EVENT_SCRAPE: |
| 705 | char _debug[512]; | 698 | if (proto == FLAG_TCP) |
| 706 | int off = snprintf( _debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time)/60 ); | 699 | ot_overall_tcp_successfulscrapes++; |
| 707 | off += fmt_ip6c(_debug+off, *ip ); | 700 | else |
| 708 | off += snprintf( _debug+off, sizeof(_debug)-off, " - FULL SCRAPE\n" ); | 701 | ot_overall_udp_successfulscrapes++; |
| 709 | (void)write( 2, _debug, off ); | 702 | break; |
| 710 | ot_full_scrape_request_count++; | 703 | case EVENT_FULLSCRAPE: |
| 711 | } | 704 | ot_full_scrape_count++; |
| 712 | break; | 705 | ot_full_scrape_size += event_data; |
| 713 | case EVENT_FAILED: | 706 | break; |
| 714 | ot_failed_request_counts[event_data]++; | 707 | case EVENT_FULLSCRAPE_REQUEST: { |
| 715 | break; | 708 | ot_ip6 *ip = (ot_ip6 *)event_data; /* ugly hack to transfer ip to stats */ |
| 716 | case EVENT_RENEW: | 709 | char _debug[512]; |
| 717 | ot_renewed[event_data]++; | 710 | int off = snprintf(_debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time) / 60); |
| 718 | break; | 711 | off += fmt_ip6c(_debug + off, *ip); |
| 719 | case EVENT_SYNC: | 712 | off += snprintf(_debug + off, sizeof(_debug) - off, " - FULL SCRAPE\n"); |
| 720 | ot_overall_sync_count+=event_data; | 713 | (void)write(2, _debug, off); |
| 721 | break; | 714 | ot_full_scrape_request_count++; |
| 722 | case EVENT_BUCKET_LOCKED: | 715 | } break; |
| 723 | ot_overall_stall_count++; | 716 | case EVENT_FULLSCRAPE_REQUEST_GZIP: { |
| 724 | break; | 717 | ot_ip6 *ip = (ot_ip6 *)event_data; /* ugly hack to transfer ip to stats */ |
| 718 | char _debug[512]; | ||
| 719 | int off = snprintf(_debug, sizeof(_debug), "[%08d] scrp: ", (unsigned int)(g_now_seconds - ot_start_time) / 60); | ||
| 720 | off += fmt_ip6c(_debug + off, *ip); | ||
| 721 | off += snprintf(_debug + off, sizeof(_debug) - off, " - FULL SCRAPE\n"); | ||
| 722 | (void)write(2, _debug, off); | ||
| 723 | ot_full_scrape_request_count++; | ||
| 724 | } break; | ||
| 725 | case EVENT_FAILED: | ||
| 726 | ot_failed_request_counts[event_data]++; | ||
| 727 | break; | ||
| 728 | case EVENT_RENEW: | ||
| 729 | ot_renewed[event_data]++; | ||
| 730 | break; | ||
| 731 | case EVENT_SYNC: | ||
| 732 | ot_overall_sync_count += event_data; | ||
| 733 | break; | ||
| 734 | case EVENT_BUCKET_LOCKED: | ||
| 735 | ot_overall_stall_count++; | ||
| 736 | break; | ||
| 725 | #ifdef WANT_SPOT_WOODPECKER | 737 | #ifdef WANT_SPOT_WOODPECKER |
| 726 | case EVENT_WOODPECKER: | 738 | case EVENT_WOODPECKER: |
| 727 | pthread_mutex_lock( &g_woodpeckers_mutex ); | 739 | pthread_mutex_lock(&g_woodpeckers_mutex); |
| 728 | stat_increase_network_count( &stats_woodpeckers_tree, 0, event_data ); | 740 | stat_increase_network_count(&stats_woodpeckers_tree, 0, event_data); |
| 729 | pthread_mutex_unlock( &g_woodpeckers_mutex ); | 741 | pthread_mutex_unlock(&g_woodpeckers_mutex); |
| 730 | break; | 742 | break; |
| 731 | #endif | 743 | #endif |
| 732 | case EVENT_CONNID_MISSMATCH: | 744 | case EVENT_CONNID_MISSMATCH: |
| 733 | ++ot_overall_udp_connectionidmissmatches; | 745 | ++ot_overall_udp_connectionidmissmatches; |
| 734 | default: | 746 | default: |
| 735 | break; | 747 | break; |
| 736 | } | 748 | } |
| 737 | } | 749 | } |
| 738 | 750 | ||
| 739 | void stats_cleanup() { | 751 | void stats_cleanup() { |
| 740 | #ifdef WANT_SPOT_WOODPECKER | 752 | #ifdef WANT_SPOT_WOODPECKER |
| 741 | pthread_mutex_lock( &g_woodpeckers_mutex ); | 753 | pthread_mutex_lock(&g_woodpeckers_mutex); |
| 742 | stats_shift_down_network_count( &stats_woodpeckers_tree, 0, 1 ); | 754 | stats_shift_down_network_count(&stats_woodpeckers_tree, 0, 1); |
| 743 | pthread_mutex_unlock( &g_woodpeckers_mutex ); | 755 | pthread_mutex_unlock(&g_woodpeckers_mutex); |
| 744 | #endif | 756 | #endif |
| 745 | } | 757 | } |
| 746 | 758 | ||
| 747 | static void * stats_worker( void * args ) { | 759 | static void *stats_worker(void *args) { |
| 748 | int iovec_entries; | 760 | int iovec_entries; |
| 749 | struct iovec *iovector; | 761 | struct iovec *iovector; |
| 750 | 762 | ||
| 751 | (void) args; | 763 | (void)args; |
| 752 | 764 | ||
| 753 | while( 1 ) { | 765 | while (1) { |
| 754 | ot_tasktype tasktype = TASK_STATS; | 766 | ot_tasktype tasktype = TASK_STATS; |
| 755 | ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); | 767 | ot_taskid taskid = mutex_workqueue_poptask(&tasktype); |
| 756 | stats_make( &iovec_entries, &iovector, tasktype ); | 768 | stats_make(&iovec_entries, &iovector, tasktype); |
| 757 | if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) | 769 | if (mutex_workqueue_pushresult(taskid, iovec_entries, iovector)) |
| 758 | iovec_free( &iovec_entries, &iovector ); | 770 | iovec_free(&iovec_entries, &iovector); |
| 759 | } | 771 | } |
| 760 | return NULL; | 772 | return NULL; |
| 761 | } | 773 | } |
| 762 | 774 | ||
| 763 | void stats_deliver( int64 sock, int tasktype ) { | 775 | void stats_deliver(int64 sock, int tasktype) { mutex_workqueue_pushtask(sock, tasktype); } |
| 764 | mutex_workqueue_pushtask( sock, tasktype ); | ||
| 765 | } | ||
| 766 | 776 | ||
| 767 | static pthread_t thread_id; | 777 | static pthread_t thread_id; |
| 768 | void stats_init( ) { | 778 | void stats_init() { |
| 769 | ot_start_time = g_now_seconds; | 779 | ot_start_time = g_now_seconds; |
| 770 | pthread_create( &thread_id, NULL, stats_worker, NULL ); | 780 | pthread_create(&thread_id, NULL, stats_worker, NULL); |
| 771 | } | 781 | } |
| 772 | 782 | ||
| 773 | void stats_deinit( ) { | 783 | void stats_deinit() { |
| 774 | pthread_cancel( thread_id ); | 784 | pthread_cancel(thread_id); |
| 775 | } | 785 | } |
| 776 | |||
| 777 | const char *g_version_stats_c = "$Source$: $Revision$\n"; | ||
| @@ -6,10 +6,12 @@ | |||
| 6 | #ifndef OT_STATS_H__ | 6 | #ifndef OT_STATS_H__ |
| 7 | #define OT_STATS_H__ | 7 | #define OT_STATS_H__ |
| 8 | 8 | ||
| 9 | #include "trackerlogic.h" | ||
| 10 | |||
| 9 | typedef enum { | 11 | typedef enum { |
| 10 | EVENT_ACCEPT, | 12 | EVENT_ACCEPT, |
| 11 | EVENT_READ, | 13 | EVENT_READ, |
| 12 | EVENT_CONNECT, /* UDP only */ | 14 | EVENT_CONNECT, /* UDP only */ |
| 13 | EVENT_ANNOUNCE, | 15 | EVENT_ANNOUNCE, |
| 14 | EVENT_COMPLETED, | 16 | EVENT_COMPLETED, |
| 15 | EVENT_RENEW, | 17 | EVENT_RENEW, |
| @@ -17,7 +19,8 @@ typedef enum { | |||
| 17 | EVENT_SCRAPE, | 19 | EVENT_SCRAPE, |
| 18 | EVENT_FULLSCRAPE_REQUEST, | 20 | EVENT_FULLSCRAPE_REQUEST, |
| 19 | EVENT_FULLSCRAPE_REQUEST_GZIP, | 21 | EVENT_FULLSCRAPE_REQUEST_GZIP, |
| 20 | EVENT_FULLSCRAPE, /* TCP only */ | 22 | EVENT_FULLSCRAPE_REQUEST_ZSTD, |
| 23 | EVENT_FULLSCRAPE, /* TCP only */ | ||
| 21 | EVENT_FAILED, | 24 | EVENT_FAILED, |
| 22 | EVENT_BUCKET_LOCKED, | 25 | EVENT_BUCKET_LOCKED, |
| 23 | EVENT_WOODPECKER, | 26 | EVENT_WOODPECKER, |
| @@ -38,15 +41,12 @@ enum { | |||
| 38 | CODE_HTTPERROR_COUNT | 41 | CODE_HTTPERROR_COUNT |
| 39 | }; | 42 | }; |
| 40 | 43 | ||
| 41 | void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ); | 44 | void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data); |
| 42 | void stats_deliver( int64 sock, int tasktype ); | 45 | void stats_deliver(int64 sock, int tasktype); |
| 43 | void stats_cleanup( void ); | 46 | void stats_cleanup(void); |
| 44 | size_t return_stats_for_tracker( char *reply, int mode, int format ); | 47 | size_t return_stats_for_tracker(char *reply, int mode, int format); |
| 45 | size_t stats_return_tracker_version( char *reply ); | 48 | size_t stats_return_tracker_version(char *reply); |
| 46 | void stats_init( void ); | 49 | void stats_init(void); |
| 47 | void stats_deinit( void ); | 50 | void stats_deinit(void); |
| 48 | |||
| 49 | extern const char *g_version_rijndael_c; | ||
| 50 | extern const char *g_version_livesync_c; | ||
| 51 | 51 | ||
| 52 | #endif | 52 | #endif |
| @@ -4,64 +4,66 @@ | |||
| 4 | $id$ */ | 4 | $id$ */ |
| 5 | 5 | ||
| 6 | /* System */ | 6 | /* System */ |
| 7 | #include <sys/types.h> | 7 | #include <pthread.h> |
| 8 | #include <sys/mman.h> | ||
| 9 | #include <sys/uio.h> | ||
| 10 | #include <stdio.h> | 8 | #include <stdio.h> |
| 11 | #include <string.h> | 9 | #include <string.h> |
| 12 | #include <pthread.h> | 10 | #include <sys/mman.h> |
| 11 | #include <sys/types.h> | ||
| 12 | #include <sys/uio.h> | ||
| 13 | 13 | ||
| 14 | /* Libowfat */ | 14 | /* Libowfat */ |
| 15 | #include "scan.h" | ||
| 16 | #include "byte.h" | 15 | #include "byte.h" |
| 17 | #include "io.h" | 16 | #include "io.h" |
| 17 | #include "scan.h" | ||
| 18 | 18 | ||
| 19 | /* Opentracker */ | 19 | /* Opentracker */ |
| 20 | #include "trackerlogic.h" | 20 | #include "ot_iovec.h" |
| 21 | #include "ot_mutex.h" | 21 | #include "ot_mutex.h" |
| 22 | #include "ot_sync.h" | ||
| 23 | #include "ot_stats.h" | 22 | #include "ot_stats.h" |
| 24 | #include "ot_iovec.h" | 23 | #include "ot_sync.h" |
| 24 | #include "trackerlogic.h" | ||
| 25 | 25 | ||
| 26 | #ifdef WANT_SYNC_BATCH | 26 | #ifdef WANT_SYNC_BATCH |
| 27 | 27 | ||
| 28 | #define OT_SYNC_CHUNK_SIZE (512*1024) | 28 | #define OT_SYNC_CHUNK_SIZE (512 * 1024) |
| 29 | 29 | ||
| 30 | /* Import Changeset from an external authority | 30 | /* Import Changeset from an external authority |
| 31 | format: d4:syncd[..]ee | 31 | format: d4:syncd[..]ee |
| 32 | [..]: ( 20:01234567890abcdefghij16:XXXXYYYY )+ | 32 | [..]: ( 20:01234567890abcdefghij16:XXXXYYYY )+ |
| 33 | */ | 33 | */ |
| 34 | int add_changeset_to_tracker( uint8_t *data, size_t len ) { | 34 | int add_changeset_to_tracker(uint8_t *data, size_t len) { |
| 35 | ot_hash *hash; | 35 | ot_hash *hash; |
| 36 | uint8_t *end = data + len; | 36 | uint8_t *end = data + len; |
| 37 | unsigned long peer_count; | 37 | unsigned long peer_count; |
| 38 | 38 | ||
| 39 | /* We do know, that the string is \n terminated, so it cant | 39 | /* We do know, that the string is \n terminated, so it cant |
| 40 | overflow */ | 40 | overflow */ |
| 41 | if( byte_diff( data, 8, "d4:syncd" ) ) return -1; | 41 | if (byte_diff(data, 8, "d4:syncd")) |
| 42 | return -1; | ||
| 42 | data += 8; | 43 | data += 8; |
| 43 | 44 | ||
| 44 | while( 1 ) { | 45 | while (1) { |
| 45 | if( byte_diff( data, 3, "20:" ) ) { | 46 | if (byte_diff(data, 3, "20:")) { |
| 46 | if( byte_diff( data, 2, "ee" ) ) | 47 | if (byte_diff(data, 2, "ee")) |
| 47 | return -1; | 48 | return -1; |
| 48 | return 0; | 49 | return 0; |
| 49 | } | 50 | } |
| 50 | data += 3; | 51 | data += 3; |
| 51 | hash = (ot_hash*)data; | 52 | hash = (ot_hash *)data; |
| 52 | data += sizeof( ot_hash ); | 53 | data += sizeof(ot_hash); |
| 53 | 54 | ||
| 54 | /* Scan string length indicator */ | 55 | /* Scan string length indicator */ |
| 55 | data += ( len = scan_ulong( (char*)data, &peer_count ) ); | 56 | data += (len = scan_ulong((char *)data, &peer_count)); |
| 56 | 57 | ||
| 57 | /* If no long was scanned, it is not divisible by 8, it is not | 58 | /* If no long was scanned, it is not divisible by 8, it is not |
| 58 | followed by a colon or claims to need to much memory, we fail */ | 59 | followed by a colon or claims to need to much memory, we fail */ |
| 59 | if( !len || !peer_count || ( peer_count & 7 ) || ( *data++ != ':' ) || ( data + peer_count > end ) ) | 60 | if (!len || !peer_count || (peer_count & 7) || (*data++ != ':') || (data + peer_count > end)) |
| 60 | return -1; | 61 | return -1; |
| 61 | 62 | ||
| 62 | while( peer_count > 0 ) { | 63 | while (peer_count > 0) { |
| 63 | add_peer_to_torrent( hash, (ot_peer*)data, 1 ); | 64 | add_peer_to_torrent(hash, (ot_peer *)data, 1); |
| 64 | data += 8; peer_count -= 8; | 65 | data += 8; |
| 66 | peer_count -= 8; | ||
| 65 | } | 67 | } |
| 66 | } | 68 | } |
| 67 | return 0; | 69 | return 0; |
| @@ -70,80 +72,86 @@ int add_changeset_to_tracker( uint8_t *data, size_t len ) { | |||
| 70 | /* Proposed output format | 72 | /* Proposed output format |
| 71 | d4:syncd20:<info_hash>8*N:(xxxxyyyy)*Nee | 73 | d4:syncd20:<info_hash>8*N:(xxxxyyyy)*Nee |
| 72 | */ | 74 | */ |
| 73 | static void sync_make( int *iovec_entries, struct iovec **iovector ) { | 75 | static void sync_make(int *iovec_entries, struct iovec **iovector) { |
| 74 | int bucket; | 76 | int bucket; |
| 75 | char *r, *re; | 77 | char *r, *re; |
| 76 | 78 | ||
| 77 | /* Setup return vector... */ | 79 | /* Setup return vector... */ |
| 78 | *iovec_entries = 0; | 80 | *iovec_entries = 0; |
| 79 | *iovector = NULL; | 81 | *iovector = NULL; |
| 80 | if( !( r = iovec_increase( iovec_entries, iovector, OT_SYNC_CHUNK_SIZE ) ) ) | 82 | if (!(r = iovec_increase(iovec_entries, iovector, OT_SYNC_CHUNK_SIZE))) |
| 81 | return; | 83 | return; |
| 82 | 84 | ||
| 83 | /* ... and pointer to end of current output buffer. | 85 | /* ... and pointer to end of current output buffer. |
| 84 | This works as a low watermark */ | 86 | This works as a low watermark */ |
| 85 | re = r + OT_SYNC_CHUNK_SIZE; | 87 | re = r + OT_SYNC_CHUNK_SIZE; |
| 86 | 88 | ||
| 87 | memmove( r, "d4:syncd", 8 ); r += 8; | 89 | memmove(r, "d4:syncd", 8); |
| 90 | r += 8; | ||
| 88 | 91 | ||
| 89 | /* For each bucket... */ | 92 | /* For each bucket... */ |
| 90 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 93 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
| 91 | /* Get exclusive access to that bucket */ | 94 | /* Get exclusive access to that bucket */ |
| 92 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 95 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
| 93 | size_t tor_offset; | 96 | size_t tor_offset; |
| 94 | 97 | ||
| 95 | /* For each torrent in this bucket.. */ | 98 | /* For each torrent in this bucket.. */ |
| 96 | for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { | 99 | for (tor_offset = 0; tor_offset < torrents_list->size; ++tor_offset) { |
| 97 | /* Address torrents members */ | 100 | /* Address torrents members */ |
| 98 | ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[tor_offset] ).peer_list; | 101 | ot_peerlist *peer_list = (((ot_torrent *)(torrents_list->data))[tor_offset]).peer_list; |
| 99 | ot_hash *hash =&( ((ot_torrent*)(torrents_list->data))[tor_offset] ).hash; | 102 | ot_hash *hash = &(((ot_torrent *)(torrents_list->data))[tor_offset]).hash; |
| 100 | const size_t byte_count = sizeof(ot_peer) * peer_list->changeset.size; | 103 | const size_t byte_count = sizeof(ot_peer) * peer_list->changeset.size; |
| 101 | 104 | ||
| 102 | /* If we reached our low watermark in buffer... */ | 105 | /* If we reached our low watermark in buffer... */ |
| 103 | if( re - r <= (ssize_t)(/* strlen( "20:" ) == */ 3 + sizeof( ot_hash ) + /* strlen_max( "%zd" ) == */ 12 + byte_count ) ) { | 106 | if (re - r <= (ssize_t)(/* strlen( "20:" ) == */ 3 + sizeof(ot_hash) + /* strlen_max( "%zd" ) == */ 12 + byte_count)) { |
| 104 | 107 | ||
| 105 | /* Allocate a fresh output buffer at the end of our buffers list | 108 | /* Allocate a fresh output buffer at the end of our buffers list |
| 106 | release bucket and return, if that fails */ | 109 | release bucket and return, if that fails */ |
| 107 | if( !( r = iovec_fix_increase_or_free( iovec_entries, iovector, r, OT_SYNC_CHUNK_SIZE ) ) ) | 110 | if (!(r = iovec_fix_increase_or_free(iovec_entries, iovector, r, OT_SYNC_CHUNK_SIZE))) |
| 108 | return mutex_bucket_unlock( bucket ); | 111 | return mutex_bucket_unlock(bucket); |
| 109 | 112 | ||
| 110 | /* Adjust new end of output buffer */ | 113 | /* Adjust new end of output buffer */ |
| 111 | re = r + OT_SYNC_CHUNK_SIZE; | 114 | re = r + OT_SYNC_CHUNK_SIZE; |
| 112 | } | 115 | } |
| 113 | 116 | ||
| 114 | *r++ = '2'; *r++ = '0'; *r++ = ':'; | 117 | *r++ = '2'; |
| 115 | memmove( r, hash, sizeof( ot_hash ) ); r += sizeof( ot_hash ); | 118 | *r++ = '0'; |
| 116 | r += sprintf( r, "%zd:", byte_count ); | 119 | *r++ = ':'; |
| 117 | memmove( r, peer_list->changeset.data, byte_count ); r += byte_count; | 120 | memmove(r, hash, sizeof(ot_hash)); |
| 121 | r += sizeof(ot_hash); | ||
| 122 | r += sprintf(r, "%zd:", byte_count); | ||
| 123 | memmove(r, peer_list->changeset.data, byte_count); | ||
| 124 | r += byte_count; | ||
| 118 | } | 125 | } |
| 119 | 126 | ||
| 120 | /* All torrents done: release lock on currenct bucket */ | 127 | /* All torrents done: release lock on currenct bucket */ |
| 121 | mutex_bucket_unlock( bucket ); | 128 | mutex_bucket_unlock(bucket); |
| 122 | } | 129 | } |
| 123 | 130 | ||
| 124 | /* Close bencoded sync dictionary */ | 131 | /* Close bencoded sync dictionary */ |
| 125 | *r++='e'; *r++='e'; | 132 | *r++ = 'e'; |
| 133 | *r++ = 'e'; | ||
| 126 | 134 | ||
| 127 | /* Release unused memory in current output buffer */ | 135 | /* Release unused memory in current output buffer */ |
| 128 | iovec_fixlast( iovec_entries, iovector, r ); | 136 | iovec_fixlast(iovec_entries, iovector, r); |
| 129 | } | 137 | } |
| 130 | 138 | ||
| 131 | /* This is the entry point into this worker thread | 139 | /* This is the entry point into this worker thread |
| 132 | It grabs tasks from mutex_tasklist and delivers results back | 140 | It grabs tasks from mutex_tasklist and delivers results back |
| 133 | */ | 141 | */ |
| 134 | static void * sync_worker( void * args) { | 142 | static void *sync_worker(void *args) { |
| 135 | int iovec_entries; | 143 | int iovec_entries; |
| 136 | struct iovec *iovector; | 144 | struct iovec *iovector; |
| 137 | 145 | ||
| 138 | args = args; | 146 | args = args; |
| 139 | 147 | ||
| 140 | while( 1 ) { | 148 | while (1) { |
| 141 | ot_tasktype tasktype = TASK_SYNC_OUT; | 149 | ot_tasktype tasktype = TASK_SYNC_OUT; |
| 142 | ot_taskid taskid = mutex_workqueue_poptask( &tasktype ); | 150 | ot_taskid taskid = mutex_workqueue_poptask(&tasktype); |
| 143 | sync_make( &iovec_entries, &iovector ); | 151 | sync_make(&iovec_entries, &iovector); |
| 144 | stats_issue_event( EVENT_SYNC_OUT, FLAG_TCP, iovec_length( &iovec_entries, &iovector) ); | 152 | stats_issue_event(EVENT_SYNC_OUT, FLAG_TCP, iovec_length(&iovec_entries, &iovector)); |
| 145 | if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) ) | 153 | if (mutex_workqueue_pushresult(taskid, iovec_entries, iovector)) |
| 146 | iovec_free( &iovec_entries, &iovector ); | 154 | iovec_free(&iovec_entries, &iovector); |
| 147 | } | 155 | } |
| 148 | return NULL; | 156 | return NULL; |
| 149 | } | 157 | } |
| @@ -162,5 +170,3 @@ void sync_deliver( int64 socket ) { | |||
| 162 | } | 170 | } |
| 163 | 171 | ||
| 164 | #endif | 172 | #endif |
| 165 | |||
| 166 | const char *g_version_sync_c = "$Source$: $Revision$\n"; | ||
| @@ -9,11 +9,11 @@ | |||
| 9 | #ifdef WANT_SYNC_BATCH | 9 | #ifdef WANT_SYNC_BATCH |
| 10 | enum { SYNC_IN, SYNC_OUT }; | 10 | enum { SYNC_IN, SYNC_OUT }; |
| 11 | 11 | ||
| 12 | void sync_init( ); | 12 | void sync_init(); |
| 13 | void sync_deinit( ); | 13 | void sync_deinit(); |
| 14 | void sync_deliver( int64 socket ); | 14 | void sync_deliver(int64 socket); |
| 15 | 15 | ||
| 16 | int add_changeset_to_tracker( uint8_t *data, size_t len ); | 16 | int add_changeset_to_tracker(uint8_t *data, size_t len); |
| 17 | #else | 17 | #else |
| 18 | 18 | ||
| 19 | #define sync_init() | 19 | #define sync_init() |
| @@ -4,30 +4,31 @@ | |||
| 4 | $id$ */ | 4 | $id$ */ |
| 5 | 5 | ||
| 6 | /* System */ | 6 | /* System */ |
| 7 | #include <stdlib.h> | ||
| 8 | #include <pthread.h> | ||
| 9 | #include <string.h> | ||
| 10 | #include <arpa/inet.h> | 7 | #include <arpa/inet.h> |
| 8 | #include <pthread.h> | ||
| 11 | #include <stdio.h> | 9 | #include <stdio.h> |
| 10 | #include <stdlib.h> | ||
| 11 | #include <string.h> | ||
| 12 | 12 | ||
| 13 | /* Libowfat */ | 13 | /* Libowfat */ |
| 14 | #include "socket.h" | ||
| 15 | #include "io.h" | 14 | #include "io.h" |
| 15 | #include "ip6.h" | ||
| 16 | #include "socket.h" | ||
| 16 | 17 | ||
| 17 | /* Opentracker */ | 18 | /* Opentracker */ |
| 18 | #include "trackerlogic.h" | ||
| 19 | #include "ot_udp.h" | ||
| 20 | #include "ot_stats.h" | ||
| 21 | #include "ot_rijndael.h" | 19 | #include "ot_rijndael.h" |
| 20 | #include "ot_stats.h" | ||
| 21 | #include "ot_udp.h" | ||
| 22 | #include "trackerlogic.h" | ||
| 22 | 23 | ||
| 23 | #if 0 | 24 | #if 0 |
| 24 | static const uint8_t g_static_connid[8] = { 0x23, 0x42, 0x05, 0x17, 0xde, 0x41, 0x50, 0xff }; | 25 | static const uint8_t g_static_connid[8] = { 0x23, 0x42, 0x05, 0x17, 0xde, 0x41, 0x50, 0xff }; |
| 25 | #endif | 26 | #endif |
| 26 | static uint32_t g_rijndael_round_key[44] = {0}; | 27 | static uint32_t g_rijndael_round_key[44] = {0}; |
| 27 | static uint32_t g_key_of_the_hour[2] = {0}; | 28 | static uint32_t g_key_of_the_hour[2] = {0}; |
| 28 | static ot_time g_hour_of_the_key; | 29 | static ot_time g_hour_of_the_key; |
| 29 | 30 | ||
| 30 | static void udp_generate_rijndael_round_key() { | 31 | static void udp_generate_rijndael_round_key() { |
| 31 | uint32_t key[16]; | 32 | uint32_t key[16]; |
| 32 | #ifdef WANT_ARC4RANDOM | 33 | #ifdef WANT_ARC4RANDOM |
| 33 | arc4random_buf(&key[0], sizeof(key)); | 34 | arc4random_buf(&key[0], sizeof(key)); |
| @@ -37,7 +38,7 @@ static void udp_generate_rijndael_round_key() { | |||
| 37 | key[2] = random(); | 38 | key[2] = random(); |
| 38 | key[3] = random(); | 39 | key[3] = random(); |
| 39 | #endif | 40 | #endif |
| 40 | rijndaelKeySetupEnc128( g_rijndael_round_key, (uint8_t*)key ); | 41 | rijndaelKeySetupEnc128(g_rijndael_round_key, (uint8_t *)key); |
| 41 | 42 | ||
| 42 | #ifdef WANT_ARC4RANDOM | 43 | #ifdef WANT_ARC4RANDOM |
| 43 | g_key_of_the_hour[0] = arc4random(); | 44 | g_key_of_the_hour[0] = arc4random(); |
| @@ -48,180 +49,188 @@ static void udp_generate_rijndael_round_key() { | |||
| 48 | } | 49 | } |
| 49 | 50 | ||
| 50 | /* Generate current and previous connection id for ip */ | 51 | /* Generate current and previous connection id for ip */ |
| 51 | static void udp_make_connectionid( uint32_t connid[2], const ot_ip6 remoteip, int age ) { | 52 | static void udp_make_connectionid(uint32_t connid[2], const ot_ip6 remoteip, int age) { |
| 52 | uint32_t plain[4], crypt[4]; | 53 | uint32_t plain[4], crypt[4]; |
| 53 | int i; | 54 | int i; |
| 54 | if( g_now_minutes + 60 > g_hour_of_the_key ) { | 55 | if (g_now_minutes + 60 > g_hour_of_the_key) { |
| 55 | g_hour_of_the_key = g_now_minutes; | 56 | g_hour_of_the_key = g_now_minutes; |
| 56 | g_key_of_the_hour[1] = g_key_of_the_hour[0]; | 57 | g_key_of_the_hour[1] = g_key_of_the_hour[0]; |
| 57 | #ifdef WANT_ARC4RANDOM | 58 | #ifdef WANT_ARC4RANDOM |
| 58 | g_key_of_the_hour[0] = arc4random(); | 59 | g_key_of_the_hour[0] = arc4random(); |
| 59 | #else | 60 | #else |
| 60 | g_key_of_the_hour[0] = random(); | 61 | g_key_of_the_hour[0] = random(); |
| 61 | #endif | 62 | #endif |
| 62 | } | 63 | } |
| 63 | 64 | ||
| 64 | memcpy( plain, remoteip, sizeof( plain ) ); | 65 | memcpy(plain, remoteip, sizeof(plain)); |
| 65 | for( i=0; i<4; ++i ) plain[i] ^= g_key_of_the_hour[age]; | 66 | for (i = 0; i < 4; ++i) |
| 66 | rijndaelEncrypt128( g_rijndael_round_key, (uint8_t*)remoteip, (uint8_t*)crypt ); | 67 | plain[i] ^= g_key_of_the_hour[age]; |
| 68 | rijndaelEncrypt128(g_rijndael_round_key, (uint8_t *)remoteip, (uint8_t *)crypt); | ||
| 67 | connid[0] = crypt[0] ^ crypt[1]; | 69 | connid[0] = crypt[0] ^ crypt[1]; |
| 68 | connid[1] = crypt[2] ^ crypt[3]; | 70 | connid[1] = crypt[2] ^ crypt[3]; |
| 69 | } | 71 | } |
| 70 | 72 | ||
| 71 | /* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */ | 73 | /* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */ |
| 72 | int handle_udp6( int64 serversocket, struct ot_workstruct *ws ) { | 74 | int handle_udp6(int64 serversocket, struct ot_workstruct *ws) { |
| 73 | ot_ip6 remoteip; | 75 | ot_ip6 remoteip; |
| 74 | uint32_t *inpacket = (uint32_t*)ws->inbuf; | 76 | uint32_t *inpacket = (uint32_t *)ws->inbuf; |
| 75 | uint32_t *outpacket = (uint32_t*)ws->outbuf; | 77 | uint32_t *outpacket = (uint32_t *)ws->outbuf; |
| 76 | uint32_t numwant, left, event, scopeid; | 78 | uint32_t left, event, scopeid; |
| 77 | uint32_t connid[2]; | 79 | uint32_t connid[2]; |
| 78 | uint32_t action; | 80 | uint32_t action; |
| 79 | uint16_t port, remoteport; | 81 | uint16_t port, remoteport; |
| 80 | size_t byte_count, scrape_count; | 82 | size_t byte_count, scrape_count; |
| 81 | 83 | ||
| 82 | byte_count = socket_recv6( serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid ); | 84 | byte_count = socket_recv6(serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid); |
| 83 | if( !byte_count ) return 0; | 85 | if (!byte_count) |
| 84 | 86 | return 0; | |
| 85 | stats_issue_event( EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip ); | 87 | |
| 86 | stats_issue_event( EVENT_READ, FLAG_UDP, byte_count ); | 88 | stats_issue_event(EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip); |
| 89 | stats_issue_event(EVENT_READ, FLAG_UDP, byte_count); | ||
| 87 | 90 | ||
| 88 | /* Minimum udp tracker packet size, also catches error */ | 91 | /* Minimum udp tracker packet size, also catches error */ |
| 89 | if( byte_count < 16 ) | 92 | if (byte_count < 16) |
| 90 | return 1; | 93 | return 1; |
| 91 | 94 | ||
| 92 | /* Get action to take. Ignore error messages and broken packets */ | 95 | /* Get action to take. Ignore error messages and broken packets */ |
| 93 | action = ntohl( inpacket[2] ); | 96 | action = ntohl(inpacket[2]); |
| 94 | if( action > 2 ) | 97 | if (action > 2) |
| 95 | return 1; | 98 | return 1; |
| 96 | 99 | ||
| 97 | /* Generate the connection id we give out and expect to and from | 100 | /* Generate the connection id we give out and expect to and from |
| 98 | the requesting ip address, this prevents udp spoofing */ | 101 | the requesting ip address, this prevents udp spoofing */ |
| 99 | udp_make_connectionid( connid, remoteip, 0 ); | 102 | udp_make_connectionid(connid, remoteip, 0); |
| 100 | 103 | ||
| 101 | /* Initialise hash pointer */ | 104 | /* Initialise hash pointer */ |
| 102 | ws->hash = NULL; | 105 | ws->hash = NULL; |
| 103 | ws->peer_id = NULL; | 106 | ws->peer_id = NULL; |
| 104 | 107 | ||
| 105 | /* If action is not 0 (connect), then we expect the derived | 108 | /* If action is not 0 (connect), then we expect the derived |
| 106 | connection id in first 64 bit */ | 109 | connection id in first 64 bit */ |
| 107 | if( ( action > 0 ) && ( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) ) { | 110 | if ((action > 0) && (inpacket[0] != connid[0] || inpacket[1] != connid[1])) { |
| 108 | /* If connection id does not match, try the one that was | 111 | /* If connection id does not match, try the one that was |
| 109 | valid in the previous hour. Only if this also does not | 112 | valid in the previous hour. Only if this also does not |
| 110 | match, return an error packet */ | 113 | match, return an error packet */ |
| 111 | udp_make_connectionid( connid, remoteip, 1 ); | 114 | udp_make_connectionid(connid, remoteip, 1); |
| 112 | if( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) { | 115 | if (inpacket[0] != connid[0] || inpacket[1] != connid[1]) { |
| 113 | const size_t s = sizeof( "Connection ID missmatch." ); | 116 | const size_t s = sizeof("Connection ID missmatch."); |
| 114 | outpacket[0] = htonl( 3 ); outpacket[1] = inpacket[3]; | 117 | outpacket[0] = htonl(3); |
| 115 | memcpy( &outpacket[2], "Connection ID missmatch.", s ); | 118 | outpacket[1] = inpacket[3]; |
| 116 | socket_send6( serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0 ); | 119 | memcpy(&outpacket[2], "Connection ID missmatch.", s); |
| 117 | stats_issue_event( EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s ); | 120 | socket_send6(serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0); |
| 121 | stats_issue_event(EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s); | ||
| 118 | return 1; | 122 | return 1; |
| 119 | } | 123 | } |
| 120 | } | 124 | } |
| 121 | 125 | ||
| 122 | switch( action ) { | 126 | switch (action) { |
| 123 | case 0: /* This is a connect action */ | 127 | case 0: /* This is a connect action */ |
| 124 | /* look for udp bittorrent magic id */ | 128 | /* look for udp bittorrent magic id */ |
| 125 | if( (ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980) ) | 129 | if ((ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980)) |
| 126 | return 1; | 130 | return 1; |
| 131 | |||
| 132 | outpacket[0] = 0; | ||
| 133 | outpacket[1] = inpacket[3]; | ||
| 134 | outpacket[2] = connid[0]; | ||
| 135 | outpacket[3] = connid[1]; | ||
| 136 | |||
| 137 | socket_send6(serversocket, ws->outbuf, 16, remoteip, remoteport, 0); | ||
| 138 | stats_issue_event(EVENT_CONNECT, FLAG_UDP, 16); | ||
| 139 | break; | ||
| 140 | case 1: /* This is an announce action */ | ||
| 141 | /* Minimum udp announce packet size */ | ||
| 142 | if (byte_count < 98) | ||
| 143 | return 1; | ||
| 144 | |||
| 145 | /* We do only want to know, if it is zero */ | ||
| 146 | left = inpacket[64 / 4] | inpacket[68 / 4]; | ||
| 127 | 147 | ||
| 128 | outpacket[0] = 0; | 148 | event = ntohl(inpacket[80 / 4]); |
| 129 | outpacket[1] = inpacket[3]; | 149 | port = *(uint16_t *)(((char *)inpacket) + 96); |
| 130 | outpacket[2] = connid[0]; | 150 | ws->hash = (ot_hash *)(((char *)inpacket) + 16); |
| 131 | outpacket[3] = connid[1]; | ||
| 132 | 151 | ||
| 133 | socket_send6( serversocket, ws->outbuf, 16, remoteip, remoteport, 0 ); | 152 | OT_SETIP(ws->peer, remoteip); |
| 134 | stats_issue_event( EVENT_CONNECT, FLAG_UDP, 16 ); | 153 | OT_SETPORT(ws->peer, &port); |
| 154 | OT_PEERFLAG(ws->peer) = 0; | ||
| 155 | |||
| 156 | switch (event) { | ||
| 157 | case 1: | ||
| 158 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED; | ||
| 159 | break; | ||
| 160 | case 3: | ||
| 161 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED; | ||
| 135 | break; | 162 | break; |
| 136 | case 1: /* This is an announce action */ | 163 | default: |
| 137 | /* Minimum udp announce packet size */ | ||
| 138 | if( byte_count < 98 ) | ||
| 139 | return 1; | ||
| 140 | |||
| 141 | /* We do only want to know, if it is zero */ | ||
| 142 | left = inpacket[64/4] | inpacket[68/4]; | ||
| 143 | |||
| 144 | /* Limit amount of peers to 200 */ | ||
| 145 | numwant = ntohl( inpacket[92/4] ); | ||
| 146 | if (numwant > 200) numwant = 200; | ||
| 147 | |||
| 148 | event = ntohl( inpacket[80/4] ); | ||
| 149 | port = *(uint16_t*)( ((char*)inpacket) + 96 ); | ||
| 150 | ws->hash = (ot_hash*)( ((char*)inpacket) + 16 ); | ||
| 151 | |||
| 152 | OT_SETIP( &ws->peer, remoteip ); | ||
| 153 | OT_SETPORT( &ws->peer, &port ); | ||
| 154 | OT_PEERFLAG( &ws->peer ) = 0; | ||
| 155 | |||
| 156 | switch( event ) { | ||
| 157 | case 1: OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED; break; | ||
| 158 | case 3: OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED; break; | ||
| 159 | default: break; | ||
| 160 | } | ||
| 161 | |||
| 162 | if( !left ) | ||
| 163 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_SEEDING; | ||
| 164 | |||
| 165 | outpacket[0] = htonl( 1 ); /* announce action */ | ||
| 166 | outpacket[1] = inpacket[12/4]; | ||
| 167 | |||
| 168 | if( OT_PEERFLAG( &ws->peer ) & PEER_FLAG_STOPPED ) { /* Peer is gone. */ | ||
| 169 | ws->reply = ws->outbuf; | ||
| 170 | ws->reply_size = remove_peer_from_torrent( FLAG_UDP, ws ); | ||
| 171 | } else { | ||
| 172 | ws->reply = ws->outbuf + 8; | ||
| 173 | ws->reply_size = 8 + add_peer_to_torrent_and_return_peers( FLAG_UDP, ws, numwant ); | ||
| 174 | } | ||
| 175 | |||
| 176 | socket_send6( serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0 ); | ||
| 177 | stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size ); | ||
| 178 | break; | 164 | break; |
| 165 | } | ||
| 179 | 166 | ||
| 180 | case 2: /* This is a scrape action */ | 167 | if (!left) |
| 181 | outpacket[0] = htonl( 2 ); /* scrape action */ | 168 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_SEEDING; |
| 182 | outpacket[1] = inpacket[12/4]; | ||
| 183 | 169 | ||
| 184 | for( scrape_count = 0; ( scrape_count * 20 < byte_count - 16) && ( scrape_count <= 74 ); scrape_count++ ) | 170 | outpacket[0] = htonl(1); /* announce action */ |
| 185 | return_udp_scrape_for_torrent( *(ot_hash*)( ((char*)inpacket) + 16 + 20 * scrape_count ), ((char*)outpacket) + 8 + 12 * scrape_count ); | 171 | outpacket[1] = inpacket[12 / 4]; |
| 186 | 172 | ||
| 187 | socket_send6( serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0 ); | 173 | if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED) { /* Peer is gone. */ |
| 188 | stats_issue_event( EVENT_SCRAPE, FLAG_UDP, scrape_count ); | 174 | ws->reply = ws->outbuf; |
| 189 | break; | 175 | ws->reply_size = remove_peer_from_torrent(FLAG_UDP, ws); |
| 176 | } else { | ||
| 177 | /* Limit amount of peers to OT_MAX_PEERS_UDP */ | ||
| 178 | uint32_t numwant = ntohl(inpacket[92 / 4]); | ||
| 179 | size_t max_peers = ip6_isv4mapped(remoteip) ? OT_MAX_PEERS_UDP4 : OT_MAX_PEERS_UDP6; | ||
| 180 | if (numwant > max_peers) | ||
| 181 | numwant = max_peers; | ||
| 182 | |||
| 183 | ws->reply = ws->outbuf + 8; | ||
| 184 | ws->reply_size = 8 + add_peer_to_torrent_and_return_peers(FLAG_UDP, ws, numwant); | ||
| 185 | } | ||
| 186 | |||
| 187 | socket_send6(serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0); | ||
| 188 | stats_issue_event(EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size); | ||
| 189 | break; | ||
| 190 | |||
| 191 | case 2: /* This is a scrape action */ | ||
| 192 | outpacket[0] = htonl(2); /* scrape action */ | ||
| 193 | outpacket[1] = inpacket[12 / 4]; | ||
| 194 | |||
| 195 | for (scrape_count = 0; (scrape_count * 20 < byte_count - 16) && (scrape_count <= 74); scrape_count++) | ||
| 196 | return_udp_scrape_for_torrent(*(ot_hash *)(((char *)inpacket) + 16 + 20 * scrape_count), ((char *)outpacket) + 8 + 12 * scrape_count); | ||
| 197 | |||
| 198 | socket_send6(serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0); | ||
| 199 | stats_issue_event(EVENT_SCRAPE, FLAG_UDP, scrape_count); | ||
| 200 | break; | ||
| 190 | } | 201 | } |
| 191 | return 1; | 202 | return 1; |
| 192 | } | 203 | } |
| 193 | 204 | ||
| 194 | static void* udp_worker( void * args ) { | 205 | static void *udp_worker(void *args) { |
| 195 | int64 sock = (int64)args; | 206 | int64 sock = (int64)args; |
| 196 | struct ot_workstruct ws; | 207 | struct ot_workstruct ws; |
| 197 | memset( &ws, 0, sizeof(ws) ); | 208 | memset(&ws, 0, sizeof(ws)); |
| 198 | 209 | ||
| 199 | ws.inbuf=malloc(G_INBUF_SIZE); | 210 | ws.inbuf = malloc(G_INBUF_SIZE); |
| 200 | ws.outbuf=malloc(G_OUTBUF_SIZE); | 211 | ws.outbuf = malloc(G_OUTBUF_SIZE); |
| 201 | #ifdef _DEBUG_HTTPERROR | 212 | #ifdef _DEBUG_HTTPERROR |
| 202 | ws.debugbuf=malloc(G_DEBUGBUF_SIZE); | 213 | ws.debugbuf = malloc(G_DEBUGBUF_SIZE); |
| 203 | #endif | 214 | #endif |
| 204 | 215 | ||
| 205 | while( g_opentracker_running ) | 216 | while (g_opentracker_running) |
| 206 | handle_udp6( sock, &ws ); | 217 | handle_udp6(sock, &ws); |
| 207 | 218 | ||
| 208 | free( ws.inbuf ); | 219 | free(ws.inbuf); |
| 209 | free( ws.outbuf ); | 220 | free(ws.outbuf); |
| 210 | #ifdef _DEBUG_HTTPERROR | 221 | #ifdef _DEBUG_HTTPERROR |
| 211 | free( ws.debugbuf ); | 222 | free(ws.debugbuf); |
| 212 | #endif | 223 | #endif |
| 213 | return NULL; | 224 | return NULL; |
| 214 | } | 225 | } |
| 215 | 226 | ||
| 216 | void udp_init( int64 sock, unsigned int worker_count ) { | 227 | void udp_init(int64 sock, unsigned int worker_count) { |
| 217 | pthread_t thread_id; | 228 | pthread_t thread_id; |
| 218 | if( !g_rijndael_round_key[0] ) | 229 | if (!g_rijndael_round_key[0]) |
| 219 | udp_generate_rijndael_round_key(); | 230 | udp_generate_rijndael_round_key(); |
| 220 | #ifdef _DEBUG | 231 | #ifdef _DEBUG |
| 221 | fprintf( stderr, " installing %d workers on udp socket %ld\n", worker_count, (unsigned long)sock ); | 232 | fprintf(stderr, " installing %d workers on udp socket %ld\n", worker_count, (unsigned long)sock); |
| 222 | #endif | 233 | #endif |
| 223 | while( worker_count-- ) | 234 | while (worker_count--) |
| 224 | pthread_create( &thread_id, NULL, udp_worker, (void *)sock ); | 235 | pthread_create(&thread_id, NULL, udp_worker, (void *)sock); |
| 225 | } | 236 | } |
| 226 | |||
| 227 | const char *g_version_udp_c = "$Source$: $Revision$\n"; | ||
| @@ -6,7 +6,7 @@ | |||
| 6 | #ifndef OT_UDP_H__ | 6 | #ifndef OT_UDP_H__ |
| 7 | #define OT_UDP_H__ | 7 | #define OT_UDP_H__ |
| 8 | 8 | ||
| 9 | void udp_init( int64 sock, unsigned int worker_count ); | 9 | void udp_init(int64 sock, unsigned int worker_count); |
| 10 | int handle_udp6( int64 serversocket, struct ot_workstruct *ws ); | 10 | int handle_udp6(int64 serversocket, struct ot_workstruct *ws); |
| 11 | 11 | ||
| 12 | #endif | 12 | #endif |
diff --git a/ot_vector.c b/ot_vector.c index 2a632b2..2acfbef 100644 --- a/ot_vector.c +++ b/ot_vector.c | |||
| @@ -4,39 +4,37 @@ | |||
| 4 | $id$ */ | 4 | $id$ */ |
| 5 | 5 | ||
| 6 | /* System */ | 6 | /* System */ |
| 7 | #include <stddef.h> | ||
| 8 | #include <stdint.h> | ||
| 7 | #include <stdlib.h> | 9 | #include <stdlib.h> |
| 8 | #include <string.h> | 10 | #include <string.h> |
| 9 | #include <strings.h> | 11 | #include <strings.h> |
| 10 | #include <stdint.h> | ||
| 11 | 12 | ||
| 12 | /* Opentracker */ | 13 | /* Opentracker */ |
| 13 | #include "trackerlogic.h" | 14 | #include "trackerlogic.h" |
| 14 | #include "ot_vector.h" | ||
| 15 | 15 | ||
| 16 | /* Libowfat */ | 16 | /* Libowfat */ |
| 17 | #include "uint32.h" | ||
| 18 | #include "uint16.h" | 17 | #include "uint16.h" |
| 18 | #include "uint32.h" | ||
| 19 | 19 | ||
| 20 | static int vector_compare_peer(const void *peer1, const void *peer2 ) { | 20 | static int vector_compare_peer6(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE6); } |
| 21 | return memcmp( peer1, peer2, OT_PEER_COMPARE_SIZE ); | 21 | static int vector_compare_peer4(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE4); } |
| 22 | } | ||
| 23 | 22 | ||
| 24 | /* This function gives us a binary search that returns a pointer, even if | 23 | /* This function gives us a binary search that returns a pointer, even if |
| 25 | no exact match is found. In that case it sets exactmatch 0 and gives | 24 | no exact match is found. In that case it sets exactmatch 0 and gives |
| 26 | calling functions the chance to insert data | 25 | calling functions the chance to insert data |
| 27 | */ | 26 | */ |
| 28 | void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size, | 27 | void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch) { |
| 29 | size_t compare_size, int *exactmatch ) { | ||
| 30 | size_t interval = member_count; | 28 | size_t interval = member_count; |
| 31 | 29 | ||
| 32 | while( interval ) { | 30 | while (interval) { |
| 33 | uint8_t *lookat = ((uint8_t*)base) + member_size * ( interval / 2 ); | 31 | uint8_t *lookat = ((uint8_t *)base) + member_size * (interval / 2); |
| 34 | int cmp = memcmp( lookat, key, compare_size ); | 32 | int cmp = memcmp(lookat, key, compare_size); |
| 35 | if(cmp == 0 ) { | 33 | if (cmp == 0) { |
| 36 | base = lookat; | 34 | base = lookat; |
| 37 | break; | 35 | break; |
| 38 | } | 36 | } |
| 39 | if(cmp < 0) { | 37 | if (cmp < 0) { |
| 40 | base = lookat + member_size; | 38 | base = lookat + member_size; |
| 41 | interval--; | 39 | interval--; |
| 42 | } | 40 | } |
| @@ -44,13 +42,14 @@ void *binary_search( const void * const key, const void * base, const size_t mem | |||
| 44 | } | 42 | } |
| 45 | 43 | ||
| 46 | *exactmatch = interval; | 44 | *exactmatch = interval; |
| 47 | return (void*)base; | 45 | return (void *)base; |
| 48 | } | 46 | } |
| 49 | 47 | ||
| 50 | static uint8_t vector_hash_peer( ot_peer *peer, int bucket_count ) { | 48 | static uint8_t vector_hash_peer(ot_peer const *peer, size_t compare_size, int bucket_count) { |
| 51 | unsigned int hash = 5381, i = OT_PEER_COMPARE_SIZE; | 49 | unsigned int hash = 5381; |
| 52 | uint8_t *p = (uint8_t*)peer; | 50 | uint8_t *p = (uint8_t *)peer; |
| 53 | while( i-- ) hash += (hash<<5) + *(p++); | 51 | while (compare_size--) |
| 52 | hash += (hash << 5) + *(p++); | ||
| 54 | return hash % bucket_count; | 53 | return hash % bucket_count; |
| 55 | } | 54 | } |
| 56 | 55 | ||
| @@ -61,48 +60,65 @@ static uint8_t vector_hash_peer( ot_peer *peer, int bucket_count ) { | |||
| 61 | if it wasn't found in vector. Caller needs to check the passed "exactmatch" variable to see, whether an insert | 60 | if it wasn't found in vector. Caller needs to check the passed "exactmatch" variable to see, whether an insert |
| 62 | took place. If resizing the vector failed, NULL is returned, else the pointer to the object in vector. | 61 | took place. If resizing the vector failed, NULL is returned, else the pointer to the object in vector. |
| 63 | */ | 62 | */ |
| 64 | void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch ) { | 63 | void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch) { |
| 65 | uint8_t *match = binary_search( key, vector->data, vector->size, member_size, compare_size, exactmatch ); | 64 | uint8_t *match = binary_search(key, vector->data, vector->size, member_size, compare_size, exactmatch); |
| 65 | |||
| 66 | if (*exactmatch) | ||
| 67 | return match; | ||
| 68 | |||
| 69 | if (vector->size + 1 > vector->space) { | ||
| 70 | size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; | ||
| 71 | ptrdiff_t match_off = match - (uint8_t *)vector->data; | ||
| 72 | uint8_t *new_data = realloc(vector->data, new_space * member_size); | ||
| 66 | 73 | ||
| 67 | if( *exactmatch ) return match; | 74 | if (!new_data) |
| 75 | return NULL; | ||
| 68 | 76 | ||
| 69 | if( vector->size + 1 > vector->space ) { | ||
| 70 | size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; | ||
| 71 | uint8_t *new_data = realloc( vector->data, new_space * member_size ); | ||
| 72 | if( !new_data ) return NULL; | ||
| 73 | /* Adjust pointer if it moved by realloc */ | 77 | /* Adjust pointer if it moved by realloc */ |
| 74 | match = new_data + (match - (uint8_t*)vector->data); | 78 | match = new_data + match_off; |
| 75 | 79 | ||
| 76 | vector->data = new_data; | 80 | vector->data = new_data; |
| 77 | vector->space = new_space; | 81 | vector->space = new_space; |
| 78 | } | 82 | } |
| 79 | memmove( match + member_size, match, ((uint8_t*)vector->data) + member_size * vector->size - match ); | 83 | memmove(match + member_size, match, ((uint8_t *)vector->data) + member_size * vector->size - match); |
| 80 | 84 | ||
| 81 | vector->size++; | 85 | vector->size++; |
| 82 | return match; | 86 | return match; |
| 83 | } | 87 | } |
| 84 | 88 | ||
| 85 | ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exactmatch ) { | 89 | ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch) { |
| 86 | ot_peer *match; | 90 | ot_peer *match, *end; |
| 91 | const size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); | ||
| 92 | size_t match_to_end; | ||
| 87 | 93 | ||
| 88 | /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ | 94 | /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ |
| 89 | if( vector->space < vector->size ) | 95 | if (vector->space < vector->size) |
| 90 | vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, vector->size ); | 96 | vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size); |
| 91 | match = (ot_peer*)binary_search( peer, vector->data, vector->size, sizeof(ot_peer), OT_PEER_COMPARE_SIZE, exactmatch ); | 97 | match = binary_search(peer, vector->data, vector->size, peer_size, compare_size, exactmatch); |
| 92 | 98 | ||
| 93 | if( *exactmatch ) return match; | 99 | if (*exactmatch) |
| 100 | return match; | ||
| 94 | 101 | ||
| 95 | if( vector->size + 1 > vector->space ) { | 102 | /* This is the amount of bytes that needs to be pushed backwards by peer_size bytes to make room for new peer */ |
| 96 | size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; | 103 | end = (ot_peer *)vector->data + vector->size * peer_size; |
| 97 | ot_peer *new_data = realloc( vector->data, new_space * sizeof(ot_peer) ); | 104 | match_to_end = end - match; |
| 98 | if( !new_data ) return NULL; | 105 | |
| 106 | if (vector->size + 1 > vector->space) { | ||
| 107 | ptrdiff_t offset = match - (ot_peer *)vector->data; | ||
| 108 | size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS; | ||
| 109 | ot_peer *new_data = realloc(vector->data, new_space * peer_size); | ||
| 110 | |||
| 111 | if (!new_data) | ||
| 112 | return NULL; | ||
| 99 | /* Adjust pointer if it moved by realloc */ | 113 | /* Adjust pointer if it moved by realloc */ |
| 100 | match = new_data + (match - (ot_peer*)vector->data); | 114 | match = new_data + offset; |
| 101 | 115 | ||
| 102 | vector->data = new_data; | 116 | vector->data = new_data; |
| 103 | vector->space = new_space; | 117 | vector->space = new_space; |
| 104 | } | 118 | } |
| 105 | memmove( match + 1, match, sizeof(ot_peer) * ( ((ot_peer*)vector->data) + vector->size - match ) ); | 119 | |
| 120 | /* Here we're guaranteed to have enough space in vector to move the block of peers after insertion point */ | ||
| 121 | memmove(match + peer_size, match, match_to_end); | ||
| 106 | 122 | ||
| 107 | vector->size++; | 123 | vector->size++; |
| 108 | return match; | 124 | return match; |
| @@ -113,126 +129,134 @@ ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exac | |||
| 113 | 1 if a non-seeding peer was removed | 129 | 1 if a non-seeding peer was removed |
| 114 | 2 if a seeding peer was removed | 130 | 2 if a seeding peer was removed |
| 115 | */ | 131 | */ |
| 116 | int vector_remove_peer( ot_vector *vector, ot_peer *peer ) { | 132 | int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size) { |
| 117 | int exactmatch; | 133 | int exactmatch, was_seeder; |
| 118 | ot_peer *match, *end; | 134 | ot_peer *match, *end; |
| 135 | size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); | ||
| 119 | 136 | ||
| 120 | if( !vector->size ) return 0; | 137 | if (!vector->size) |
| 138 | return 0; | ||
| 121 | 139 | ||
| 122 | /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ | 140 | /* If space is zero but size is set, we're dealing with a list of vector->size buckets */ |
| 123 | if( vector->space < vector->size ) | 141 | if (vector->space < vector->size) |
| 124 | vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, vector->size ); | 142 | vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size); |
| 125 | 143 | ||
| 126 | end = ((ot_peer*)vector->data) + vector->size; | 144 | end = ((ot_peer *)vector->data) + peer_size * vector->size; |
| 127 | match = (ot_peer*)binary_search( peer, vector->data, vector->size, sizeof(ot_peer), OT_PEER_COMPARE_SIZE, &exactmatch ); | 145 | match = (ot_peer *)binary_search(peer, vector->data, vector->size, peer_size, compare_size, &exactmatch); |
| 128 | if( !exactmatch ) return 0; | 146 | if (!exactmatch) |
| 147 | return 0; | ||
| 129 | 148 | ||
| 130 | exactmatch = ( OT_PEERFLAG( match ) & PEER_FLAG_SEEDING ) ? 2 : 1; | 149 | was_seeder = (OT_PEERFLAG_D(match, peer_size) & PEER_FLAG_SEEDING) ? 2 : 1; |
| 131 | memmove( match, match + 1, sizeof(ot_peer) * ( end - match - 1 ) ); | 150 | memmove(match, match + peer_size, end - match - peer_size); |
| 132 | 151 | ||
| 133 | vector->size--; | 152 | vector->size--; |
| 134 | vector_fixup_peers( vector ); | 153 | vector_fixup_peers(vector, peer_size); |
| 135 | return exactmatch; | 154 | return was_seeder; |
| 136 | } | 155 | } |
| 137 | 156 | ||
| 138 | void vector_remove_torrent( ot_vector *vector, ot_torrent *match ) { | 157 | void vector_remove_torrent(ot_vector *vector, ot_torrent *match) { |
| 139 | ot_torrent *end = ((ot_torrent*)vector->data) + vector->size; | 158 | ot_torrent *end = ((ot_torrent *)vector->data) + vector->size; |
| 140 | 159 | ||
| 141 | if( !vector->size ) return; | 160 | if (!vector->size) |
| 161 | return; | ||
| 142 | 162 | ||
| 143 | /* If this is being called after a unsuccessful malloc() for peer_list | 163 | /* If this is being called after a unsuccessful malloc() for peer_list |
| 144 | in add_peer_to_torrent, match->peer_list actually might be NULL */ | 164 | in add_peer_to_torrent, match->peer_list actually might be NULL */ |
| 145 | if( match->peer_list) free_peerlist( match->peer_list ); | 165 | free_peerlist(match->peer_list6); |
| 166 | free_peerlist(match->peer_list4); | ||
| 146 | 167 | ||
| 147 | memmove( match, match + 1, sizeof(ot_torrent) * ( end - match - 1 ) ); | 168 | memmove(match, match + 1, sizeof(ot_torrent) * (end - match - 1)); |
| 148 | if( ( --vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) && ( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) { | 169 | if ((--vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) { |
| 149 | vector->space /= OT_VECTOR_SHRINK_RATIO; | 170 | vector->space /= OT_VECTOR_SHRINK_RATIO; |
| 150 | vector->data = realloc( vector->data, vector->space * sizeof( ot_torrent ) ); | 171 | vector->data = realloc(vector->data, vector->space * sizeof(ot_torrent)); |
| 151 | } | 172 | } |
| 152 | } | 173 | } |
| 153 | 174 | ||
| 154 | void vector_clean_list( ot_vector * vector, int num_buckets ) { | 175 | void vector_clean_list(ot_vector *vector, int num_buckets) { |
| 155 | while( num_buckets-- ) | 176 | while (num_buckets--) |
| 156 | free( vector[num_buckets].data ); | 177 | free(vector[num_buckets].data); |
| 157 | free( vector ); | 178 | free(vector); |
| 158 | return; | 179 | return; |
| 159 | } | 180 | } |
| 160 | 181 | ||
| 161 | void vector_redistribute_buckets( ot_peerlist * peer_list ) { | 182 | void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size) { |
| 162 | int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1; | 183 | int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1; |
| 163 | ot_vector * bucket_list_new, * bucket_list_old = &peer_list->peers; | 184 | ot_vector *bucket_list_new, *bucket_list_old = &peer_list->peers; |
| 185 | int (*sort_func)(const void *, const void *) = peer_size == OT_PEER_SIZE6 ? &vector_compare_peer6 : &vector_compare_peer4; | ||
| 164 | 186 | ||
| 165 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { | 187 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
| 166 | num_buckets_old = peer_list->peers.size; | 188 | num_buckets_old = peer_list->peers.size; |
| 167 | bucket_list_old = peer_list->peers.data; | 189 | bucket_list_old = peer_list->peers.data; |
| 168 | } | 190 | } |
| 169 | 191 | ||
| 170 | if( peer_list->peer_count < 255 ) | 192 | if (peer_list->peer_count < 255) |
| 171 | num_buckets_new = 1; | 193 | num_buckets_new = 1; |
| 172 | else if( peer_list->peer_count > 8192 ) | 194 | else if (peer_list->peer_count > 8192) |
| 173 | num_buckets_new = 64; | 195 | num_buckets_new = 64; |
| 174 | else if( peer_list->peer_count >= 512 && peer_list->peer_count < 4096 ) | 196 | else if (peer_list->peer_count >= 512 && peer_list->peer_count < 4096) |
| 175 | num_buckets_new = 16; | 197 | num_buckets_new = 16; |
| 176 | else if( peer_list->peer_count < 512 && num_buckets_old <= 16 ) | 198 | else if (peer_list->peer_count < 512 && num_buckets_old <= 16) |
| 177 | num_buckets_new = num_buckets_old; | 199 | num_buckets_new = num_buckets_old; |
| 178 | else if( peer_list->peer_count < 512 ) | 200 | else if (peer_list->peer_count < 512) |
| 179 | num_buckets_new = 1; | 201 | num_buckets_new = 1; |
| 180 | else if( peer_list->peer_count < 8192 && num_buckets_old > 1 ) | 202 | else if (peer_list->peer_count < 8192 && num_buckets_old > 1) |
| 181 | num_buckets_new = num_buckets_old; | 203 | num_buckets_new = num_buckets_old; |
| 182 | else | 204 | else |
| 183 | num_buckets_new = 16; | 205 | num_buckets_new = 16; |
| 184 | 206 | ||
| 185 | if( num_buckets_new == num_buckets_old ) | 207 | if (num_buckets_new == num_buckets_old) |
| 186 | return; | 208 | return; |
| 187 | 209 | ||
| 188 | /* Assume near perfect distribution */ | 210 | /* Assume near perfect distribution */ |
| 189 | bucket_list_new = malloc( num_buckets_new * sizeof( ot_vector ) ); | 211 | bucket_list_new = malloc(num_buckets_new * sizeof(ot_vector)); |
| 190 | if( !bucket_list_new) return; | 212 | if (!bucket_list_new) |
| 191 | bzero( bucket_list_new, num_buckets_new * sizeof( ot_vector ) ); | 213 | return; |
| 214 | bzero(bucket_list_new, num_buckets_new * sizeof(ot_vector)); | ||
| 192 | 215 | ||
| 193 | tmp = peer_list->peer_count / num_buckets_new; | 216 | tmp = peer_list->peer_count / num_buckets_new; |
| 194 | bucket_size_new = OT_VECTOR_MIN_MEMBERS; | 217 | bucket_size_new = OT_VECTOR_MIN_MEMBERS; |
| 195 | while( bucket_size_new < tmp) | 218 | while (bucket_size_new < tmp) |
| 196 | bucket_size_new *= OT_VECTOR_GROW_RATIO; | 219 | bucket_size_new *= OT_VECTOR_GROW_RATIO; |
| 197 | 220 | ||
| 198 | /* preallocate vectors to hold all peers */ | 221 | /* preallocate vectors to hold all peers */ |
| 199 | for( bucket=0; bucket<num_buckets_new; ++bucket ) { | 222 | for (bucket = 0; bucket < num_buckets_new; ++bucket) { |
| 200 | bucket_list_new[bucket].space = bucket_size_new; | 223 | bucket_list_new[bucket].space = bucket_size_new; |
| 201 | bucket_list_new[bucket].data = malloc( bucket_size_new * sizeof(ot_peer) ); | 224 | bucket_list_new[bucket].data = malloc(bucket_size_new * peer_size); |
| 202 | if( !bucket_list_new[bucket].data ) | 225 | if (!bucket_list_new[bucket].data) |
| 203 | return vector_clean_list( bucket_list_new, num_buckets_new ); | 226 | return vector_clean_list(bucket_list_new, num_buckets_new); |
| 204 | } | 227 | } |
| 205 | 228 | ||
| 206 | /* Now sort them into the correct bucket */ | 229 | /* Now sort them into the correct bucket */ |
| 207 | for( bucket=0; bucket<num_buckets_old; ++bucket ) { | 230 | for (bucket = 0; bucket < num_buckets_old; ++bucket) { |
| 208 | ot_peer * peers_old = bucket_list_old[bucket].data, * peers_new; | 231 | ot_peer *peers_old = bucket_list_old[bucket].data; |
| 209 | int peer_count_old = bucket_list_old[bucket].size; | 232 | int peer_count_old = bucket_list_old[bucket].size; |
| 210 | while( peer_count_old-- ) { | 233 | while (peer_count_old--) { |
| 211 | ot_vector * bucket_dest = bucket_list_new; | 234 | ot_vector *bucket_dest = bucket_list_new; |
| 212 | if( num_buckets_new > 1 ) | 235 | if (num_buckets_new > 1) |
| 213 | bucket_dest += vector_hash_peer(peers_old, num_buckets_new); | 236 | bucket_dest += vector_hash_peer(peers_old, OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size), num_buckets_new); |
| 214 | if( bucket_dest->size + 1 > bucket_dest->space ) { | 237 | if (bucket_dest->size + 1 > bucket_dest->space) { |
| 215 | void * tmp = realloc( bucket_dest->data, sizeof(ot_peer) * OT_VECTOR_GROW_RATIO * bucket_dest->space ); | 238 | void *tmp = realloc(bucket_dest->data, peer_size * OT_VECTOR_GROW_RATIO * bucket_dest->space); |
| 216 | if( !tmp ) return vector_clean_list( bucket_list_new, num_buckets_new ); | 239 | if (!tmp) |
| 240 | return vector_clean_list(bucket_list_new, num_buckets_new); | ||
| 217 | bucket_dest->data = tmp; | 241 | bucket_dest->data = tmp; |
| 218 | bucket_dest->space *= OT_VECTOR_GROW_RATIO; | 242 | bucket_dest->space *= OT_VECTOR_GROW_RATIO; |
| 219 | } | 243 | } |
| 220 | peers_new = (ot_peer*)bucket_dest->data; | 244 | memcpy((ot_peer *)bucket_dest->data + peer_size * bucket_dest->size++, peers_old, peer_size); |
| 221 | memcpy(peers_new + bucket_dest->size++, peers_old++, sizeof(ot_peer)); | 245 | peers_old += peer_size; |
| 222 | } | 246 | } |
| 223 | } | 247 | } |
| 224 | 248 | ||
| 225 | /* Now sort each bucket to later allow bsearch */ | 249 | /* Now sort each bucket to later allow bsearch */ |
| 226 | for( bucket=0; bucket<num_buckets_new; ++bucket ) | 250 | for (bucket = 0; bucket < num_buckets_new; ++bucket) |
| 227 | qsort( bucket_list_new[bucket].data, bucket_list_new[bucket].size, sizeof( ot_peer ), vector_compare_peer ); | 251 | qsort(bucket_list_new[bucket].data, bucket_list_new[bucket].size, peer_size, sort_func); |
| 228 | 252 | ||
| 229 | /* Everything worked fine. Now link new bucket_list to peer_list */ | 253 | /* Everything worked fine. Now link new bucket_list to peer_list */ |
| 230 | if( OT_PEERLIST_HASBUCKETS( peer_list) ) | 254 | if (OT_PEERLIST_HASBUCKETS(peer_list)) |
| 231 | vector_clean_list( (ot_vector*)peer_list->peers.data, peer_list->peers.size ); | 255 | vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size); |
| 232 | else | 256 | else |
| 233 | free( peer_list->peers.data ); | 257 | free(peer_list->peers.data); |
| 234 | 258 | ||
| 235 | if( num_buckets_new > 1 ) { | 259 | if (num_buckets_new > 1) { |
| 236 | peer_list->peers.data = bucket_list_new; | 260 | peer_list->peers.data = bucket_list_new; |
| 237 | peer_list->peers.size = num_buckets_new; | 261 | peer_list->peers.size = num_buckets_new; |
| 238 | peer_list->peers.space = 0; /* Magic marker for "is list of buckets" */ | 262 | peer_list->peers.space = 0; /* Magic marker for "is list of buckets" */ |
| @@ -240,27 +264,24 @@ void vector_redistribute_buckets( ot_peerlist * peer_list ) { | |||
| 240 | peer_list->peers.data = bucket_list_new->data; | 264 | peer_list->peers.data = bucket_list_new->data; |
| 241 | peer_list->peers.size = bucket_list_new->size; | 265 | peer_list->peers.size = bucket_list_new->size; |
| 242 | peer_list->peers.space = bucket_list_new->space; | 266 | peer_list->peers.space = bucket_list_new->space; |
| 243 | free( bucket_list_new ); | 267 | free(bucket_list_new); |
| 244 | } | 268 | } |
| 245 | } | 269 | } |
| 246 | 270 | ||
| 247 | void vector_fixup_peers( ot_vector * vector ) { | 271 | void vector_fixup_peers(ot_vector *vector, size_t peer_size) { |
| 248 | int need_fix = 0; | 272 | int need_fix = 0; |
| 249 | 273 | ||
| 250 | if( !vector->size ) { | 274 | if (!vector->size) { |
| 251 | free( vector->data ); | 275 | free(vector->data); |
| 252 | vector->data = NULL; | 276 | vector->data = NULL; |
| 253 | vector->space = 0; | 277 | vector->space = 0; |
| 254 | return; | 278 | return; |
| 255 | } | 279 | } |
| 256 | 280 | ||
| 257 | while( ( vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) && | 281 | while ((vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) { |
| 258 | ( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) { | ||
| 259 | vector->space /= OT_VECTOR_SHRINK_RATIO; | 282 | vector->space /= OT_VECTOR_SHRINK_RATIO; |
| 260 | need_fix++; | 283 | need_fix++; |
| 261 | } | 284 | } |
| 262 | if( need_fix ) | 285 | if (need_fix) |
| 263 | vector->data = realloc( vector->data, vector->space * sizeof( ot_peer ) ); | 286 | vector->data = realloc(vector->data, vector->space * peer_size); |
| 264 | } | 287 | } |
| 265 | |||
| 266 | const char *g_version_vector_c = "$Source$: $Revision$\n"; | ||
diff --git a/ot_vector.h b/ot_vector.h index f7f87aa..8d41452 100644 --- a/ot_vector.h +++ b/ot_vector.h | |||
| @@ -16,19 +16,21 @@ | |||
| 16 | #define OT_PEER_BUCKET_MAXCOUNT 256 | 16 | #define OT_PEER_BUCKET_MAXCOUNT 256 |
| 17 | 17 | ||
| 18 | typedef struct { | 18 | typedef struct { |
| 19 | void *data; | 19 | void *data; |
| 20 | size_t size; | 20 | size_t size; |
| 21 | size_t space; | 21 | size_t space; |
| 22 | } ot_vector; | 22 | } ot_vector; |
| 23 | 23 | ||
| 24 | void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size, | 24 | void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch); |
| 25 | size_t compare_size, int *exactmatch ); | 25 | void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch); |
| 26 | void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch ); | 26 | ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch); |
| 27 | ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exactmatch ); | ||
| 28 | 27 | ||
| 29 | int vector_remove_peer( ot_vector *vector, ot_peer *peer ); | 28 | int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size); |
| 30 | void vector_remove_torrent( ot_vector *vector, ot_torrent *match ); | 29 | void vector_remove_torrent(ot_vector *vector, ot_torrent *match); |
| 31 | void vector_redistribute_buckets( ot_peerlist * peer_list ); | 30 | |
| 32 | void vector_fixup_peers( ot_vector * vector ); | 31 | /* For ot_clean.c */ |
| 32 | void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size); | ||
| 33 | void vector_fixup_peers(ot_vector *vector, size_t peer_size); | ||
| 34 | void vector_clean_list(ot_vector *vector, int num_buckets); | ||
| 33 | 35 | ||
| 34 | #endif | 36 | #endif |
| @@ -4,33 +4,33 @@ | |||
| 4 | $Id$ */ | 4 | $Id$ */ |
| 5 | 5 | ||
| 6 | /* System */ | 6 | /* System */ |
| 7 | #include <arpa/inet.h> | ||
| 8 | #include <ctype.h> | ||
| 9 | #include <errno.h> | ||
| 10 | #include <pthread.h> | ||
| 11 | #include <pwd.h> | ||
| 12 | #include <signal.h> | ||
| 7 | #include <stdint.h> | 13 | #include <stdint.h> |
| 14 | #include <stdio.h> | ||
| 8 | #include <stdlib.h> | 15 | #include <stdlib.h> |
| 9 | #include <string.h> | 16 | #include <string.h> |
| 10 | #include <arpa/inet.h> | ||
| 11 | #include <sys/socket.h> | 17 | #include <sys/socket.h> |
| 12 | #include <unistd.h> | 18 | #include <unistd.h> |
| 13 | #include <errno.h> | ||
| 14 | #include <signal.h> | ||
| 15 | #include <stdio.h> | ||
| 16 | #include <pwd.h> | ||
| 17 | #include <ctype.h> | ||
| 18 | #include <pthread.h> | ||
| 19 | 19 | ||
| 20 | /* Libowfat */ | 20 | /* Libowfat */ |
| 21 | #include "socket.h" | 21 | #include "byte.h" |
| 22 | #include "io.h" | 22 | #include "io.h" |
| 23 | #include "iob.h" | 23 | #include "iob.h" |
| 24 | #include "byte.h" | ||
| 25 | #include "scan.h" | ||
| 26 | #include "ip6.h" | 24 | #include "ip6.h" |
| 27 | #include "ndelay.h" | 25 | #include "ndelay.h" |
| 26 | #include "scan.h" | ||
| 27 | #include "socket.h" | ||
| 28 | 28 | ||
| 29 | /* Opentracker */ | 29 | /* Opentracker */ |
| 30 | #include "trackerlogic.h" | ||
| 31 | #include "ot_vector.h" | ||
| 32 | #include "ot_mutex.h" | 30 | #include "ot_mutex.h" |
| 33 | #include "ot_stats.h" | 31 | #include "ot_stats.h" |
| 32 | #include "ot_vector.h" | ||
| 33 | #include "trackerlogic.h" | ||
| 34 | 34 | ||
| 35 | #ifndef WANT_SYNC_LIVE | 35 | #ifndef WANT_SYNC_LIVE |
| 36 | #define WANT_SYNC_LIVE | 36 | #define WANT_SYNC_LIVE |
| @@ -40,28 +40,28 @@ | |||
| 40 | ot_ip6 g_serverip; | 40 | ot_ip6 g_serverip; |
| 41 | uint16_t g_serverport = 9009; | 41 | uint16_t g_serverport = 9009; |
| 42 | uint32_t g_tracker_id; | 42 | uint32_t g_tracker_id; |
| 43 | char groupip_1[4] = { 224,0,23,5 }; | 43 | char groupip_1[4] = {224, 0, 23, 5}; |
| 44 | int g_self_pipe[2]; | 44 | int g_self_pipe[2]; |
| 45 | 45 | ||
| 46 | /* If you have more than 10 peers, don't use this proxy | 46 | /* If you have more than 10 peers, don't use this proxy |
| 47 | Use 20 slots for 10 peers to have room for 10 incoming connection slots | 47 | Use 20 slots for 10 peers to have room for 10 incoming connection slots |
| 48 | */ | 48 | */ |
| 49 | #define MAX_PEERS 20 | 49 | #define MAX_PEERS 20 |
| 50 | 50 | ||
| 51 | #define LIVESYNC_INCOMING_BUFFSIZE (256*256) | 51 | #define LIVESYNC_INCOMING_BUFFSIZE (256 * 256) |
| 52 | #define STREAMSYNC_OUTGOING_BUFFSIZE (256*256) | 52 | #define STREAMSYNC_OUTGOING_BUFFSIZE (256 * 256) |
| 53 | 53 | ||
| 54 | #define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480 | 54 | #define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480 |
| 55 | #define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer)+sizeof(ot_hash)) | 55 | #define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer) + sizeof(ot_hash)) |
| 56 | #define LIVESYNC_MAXDELAY 15 /* seconds */ | 56 | #define LIVESYNC_MAXDELAY 15 /* seconds */ |
| 57 | 57 | ||
| 58 | /* The amount of time a complete sync cycle should take */ | 58 | /* The amount of time a complete sync cycle should take */ |
| 59 | #define OT_SYNC_INTERVAL_MINUTES 2 | 59 | #define OT_SYNC_INTERVAL_MINUTES 2 |
| 60 | 60 | ||
| 61 | /* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */ | 61 | /* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */ |
| 62 | #define OT_SYNC_SLEEP ( ( ( OT_SYNC_INTERVAL_MINUTES ) * 60 * 1000000 ) / ( OT_BUCKET_COUNT ) ) | 62 | #define OT_SYNC_SLEEP (((OT_SYNC_INTERVAL_MINUTES) * 60 * 1000000) / (OT_BUCKET_COUNT)) |
| 63 | 63 | ||
| 64 | enum { OT_SYNC_PEER }; | 64 | enum { OT_SYNC_PEER4, OT_SYNC_PEER6 }; |
| 65 | enum { FLAG_SERVERSOCKET = 1 }; | 65 | enum { FLAG_SERVERSOCKET = 1 }; |
| 66 | 66 | ||
| 67 | /* For incoming packets */ | 67 | /* For incoming packets */ |
| @@ -75,145 +75,153 @@ static uint8_t *g_peerbuffer_pos; | |||
| 75 | static uint8_t *g_peerbuffer_highwater = g_peerbuffer_start + LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS; | 75 | static uint8_t *g_peerbuffer_highwater = g_peerbuffer_start + LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS; |
| 76 | static ot_time g_next_packet_time; | 76 | static ot_time g_next_packet_time; |
| 77 | 77 | ||
| 78 | static void * livesync_worker( void * args ); | 78 | static void *livesync_worker(void *args); |
| 79 | static void * streamsync_worker( void * args ); | 79 | static void *streamsync_worker(void *args); |
| 80 | static void livesync_proxytell( uint8_t prefix, uint8_t *info_hash, uint8_t *peer ); | 80 | static void livesync_proxytell(uint8_t prefix, uint8_t *info_hash, uint8_t *peer); |
| 81 | 81 | ||
| 82 | void exerr( char * message ) { | 82 | void exerr(char *message) { |
| 83 | fprintf( stderr, "%s\n", message ); | 83 | fprintf(stderr, "%s\n", message); |
| 84 | exit( 111 ); | 84 | exit(111); |
| 85 | } | 85 | } |
| 86 | 86 | ||
| 87 | void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data ) { | 87 | void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data) { |
| 88 | (void) event; | 88 | (void)event; |
| 89 | (void) proto; | 89 | (void)proto; |
| 90 | (void) event_data; | 90 | (void)event_data; |
| 91 | } | 91 | } |
| 92 | 92 | ||
| 93 | void livesync_bind_mcast( ot_ip6 ip, uint16_t port) { | 93 | void livesync_bind_mcast(ot_ip6 ip, uint16_t port) { |
| 94 | char tmpip[4] = {0,0,0,0}; | 94 | char tmpip[4] = {0, 0, 0, 0}; |
| 95 | char *v4ip; | 95 | char *v4ip; |
| 96 | 96 | ||
| 97 | if( !ip6_isv4mapped(ip)) | 97 | if (!ip6_isv4mapped(ip)) |
| 98 | exerr("v6 mcast support not yet available."); | 98 | exerr("v6 mcast support not yet available."); |
| 99 | v4ip = ip+12; | 99 | v4ip = ip + 12; |
| 100 | 100 | ||
| 101 | if( g_socket_in != -1 ) | 101 | if (g_socket_in != -1) |
| 102 | exerr("Error: Livesync listen ip specified twice."); | 102 | exerr("Error: Livesync listen ip specified twice."); |
| 103 | 103 | ||
| 104 | if( ( g_socket_in = socket_udp4( )) < 0) | 104 | if ((g_socket_in = socket_udp4()) < 0) |
| 105 | exerr("Error: Cant create live sync incoming socket." ); | 105 | exerr("Error: Cant create live sync incoming socket."); |
| 106 | ndelay_off(g_socket_in); | 106 | ndelay_off(g_socket_in); |
| 107 | 107 | ||
| 108 | if( socket_bind4_reuse( g_socket_in, tmpip, port ) == -1 ) | 108 | if (socket_bind4_reuse(g_socket_in, tmpip, port) == -1) |
| 109 | exerr("Error: Cant bind live sync incoming socket." ); | 109 | exerr("Error: Cant bind live sync incoming socket."); |
| 110 | 110 | ||
| 111 | if( socket_mcjoin4( g_socket_in, groupip_1, v4ip ) ) | 111 | if (socket_mcjoin4(g_socket_in, groupip_1, v4ip)) |
| 112 | exerr("Error: Cant make live sync incoming socket join mcast group."); | 112 | exerr("Error: Cant make live sync incoming socket join mcast group."); |
| 113 | 113 | ||
| 114 | if( ( g_socket_out = socket_udp4()) < 0) | 114 | if ((g_socket_out = socket_udp4()) < 0) |
| 115 | exerr("Error: Cant create live sync outgoing socket." ); | 115 | exerr("Error: Cant create live sync outgoing socket."); |
| 116 | if( socket_bind4_reuse( g_socket_out, v4ip, port ) == -1 ) | 116 | if (socket_bind4_reuse(g_socket_out, v4ip, port) == -1) |
| 117 | exerr("Error: Cant bind live sync outgoing socket." ); | 117 | exerr("Error: Cant bind live sync outgoing socket."); |
| 118 | 118 | ||
| 119 | socket_mcttl4(g_socket_out, 1); | 119 | socket_mcttl4(g_socket_out, 1); |
| 120 | socket_mcloop4(g_socket_out, 1); | 120 | socket_mcloop4(g_socket_out, 1); |
| 121 | } | 121 | } |
| 122 | 122 | ||
| 123 | size_t add_peer_to_torrent_proxy( ot_hash hash, ot_peer *peer ) { | 123 | size_t add_peer_to_torrent_proxy(ot_hash hash, ot_peer *peer, size_t peer_size) { |
| 124 | int exactmatch; | 124 | int exactmatch; |
| 125 | ot_torrent *torrent; | 125 | ot_torrent *torrent; |
| 126 | ot_peer *peer_dest; | 126 | ot_peerlist *peer_list; |
| 127 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | 127 | ot_peer *peer_dest; |
| 128 | 128 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash); | |
| 129 | torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 129 | size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); |
| 130 | if( !torrent ) | 130 | |
| 131 | torrent = vector_find_or_insert(torrents_list, (void *)hash, sizeof(ot_torrent), compare_size, &exactmatch); | ||
| 132 | if (!torrent) | ||
| 131 | return -1; | 133 | return -1; |
| 132 | 134 | ||
| 133 | if( !exactmatch ) { | 135 | if (!exactmatch) { |
| 134 | /* Create a new torrent entry, then */ | 136 | /* Create a new torrent entry, then */ |
| 135 | memcpy( torrent->hash, hash, sizeof(ot_hash) ); | 137 | memcpy(torrent->hash, hash, sizeof(ot_hash)); |
| 136 | 138 | ||
| 137 | if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { | 139 | if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) { |
| 138 | vector_remove_torrent( torrents_list, torrent ); | 140 | vector_remove_torrent(torrents_list, torrent); |
| 139 | mutex_bucket_unlock_by_hash( hash, 0 ); | 141 | mutex_bucket_unlock_by_hash(hash, 0); |
| 140 | return -1; | 142 | return -1; |
| 141 | } | 143 | } |
| 142 | 144 | ||
| 143 | byte_zero( torrent->peer_list, sizeof( ot_peerlist ) ); | 145 | byte_zero(torrent->peer_list6, sizeof(ot_peerlist)); |
| 146 | byte_zero(torrent->peer_list4, sizeof(ot_peerlist)); | ||
| 144 | } | 147 | } |
| 145 | 148 | ||
| 149 | peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; | ||
| 150 | |||
| 146 | /* Check for peer in torrent */ | 151 | /* Check for peer in torrent */ |
| 147 | peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), peer, &exactmatch ); | 152 | peer_dest = vector_find_or_insert_peer(&(peer_list->peers), peer, peer_size, &exactmatch); |
| 148 | if( !peer_dest ) { | 153 | if (!peer_dest) { |
| 149 | mutex_bucket_unlock_by_hash( hash, 0 ); | 154 | mutex_bucket_unlock_by_hash(hash, 0); |
| 150 | return -1; | 155 | return -1; |
| 151 | } | 156 | } |
| 152 | /* Tell peer that it's fresh */ | 157 | /* Tell peer that it's fresh */ |
| 153 | OT_PEERTIME( peer ) = 0; | 158 | OT_PEERTIME(peer, peer_size) = 0; |
| 154 | 159 | ||
| 155 | /* If we hadn't had a match create peer there */ | 160 | /* If we hadn't had a match create peer there */ |
| 156 | if( !exactmatch ) { | 161 | if (!exactmatch) { |
| 157 | torrent->peer_list->peer_count++; | 162 | peer_list->peer_count++; |
| 158 | if( OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) | 163 | if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_SEEDING) |
| 159 | torrent->peer_list->seed_count++; | 164 | peer_list->seed_count++; |
| 160 | } | 165 | } |
| 161 | memcpy( peer_dest, peer, sizeof(ot_peer) ); | 166 | memcpy(peer_dest, peer, peer_size); |
| 162 | mutex_bucket_unlock_by_hash( hash, 0 ); | 167 | mutex_bucket_unlock_by_hash(hash, 0); |
| 163 | return 0; | 168 | return 0; |
| 164 | } | 169 | } |
| 165 | 170 | ||
| 166 | size_t remove_peer_from_torrent_proxy( ot_hash hash, ot_peer *peer ) { | 171 | size_t remove_peer_from_torrent_proxy(ot_hash hash, ot_peer *peer, size_t peer_size) { |
| 167 | int exactmatch; | 172 | int exactmatch; |
| 168 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | 173 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash); |
| 169 | ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 174 | ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
| 170 | 175 | ||
| 171 | if( exactmatch ) { | 176 | if (exactmatch) { |
| 172 | ot_peerlist *peer_list = torrent->peer_list; | 177 | ot_peerlist *peer_list = peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; |
| 173 | switch( vector_remove_peer( &peer_list->peers, peer ) ) { | 178 | switch (vector_remove_peer(&peer_list->peers, peer, peer_size)) { |
| 174 | case 2: peer_list->seed_count--; /* Intentional fallthrough */ | 179 | case 2: |
| 175 | case 1: peer_list->peer_count--; /* Intentional fallthrough */ | 180 | peer_list->seed_count--; /* Intentional fallthrough */ |
| 176 | default: break; | 181 | case 1: |
| 182 | peer_list->peer_count--; /* Intentional fallthrough */ | ||
| 183 | default: | ||
| 184 | break; | ||
| 177 | } | 185 | } |
| 178 | } | 186 | } |
| 179 | 187 | ||
| 180 | mutex_bucket_unlock_by_hash( hash, 0 ); | 188 | mutex_bucket_unlock_by_hash(hash, 0); |
| 181 | return 0; | 189 | return 0; |
| 182 | } | 190 | } |
| 183 | 191 | ||
| 184 | void free_peerlist( ot_peerlist *peer_list ) { | 192 | void free_peerlist(ot_peerlist *peer_list) { |
| 185 | if( peer_list->peers.data ) { | 193 | if (peer_list->peers.data) { |
| 186 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { | 194 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
| 187 | ot_vector *bucket_list = (ot_vector*)(peer_list->peers.data); | 195 | ot_vector *bucket_list = (ot_vector *)(peer_list->peers.data); |
| 188 | 196 | ||
| 189 | while( peer_list->peers.size-- ) | 197 | while (peer_list->peers.size--) |
| 190 | free( bucket_list++->data ); | 198 | free(bucket_list++->data); |
| 191 | } | 199 | } |
| 192 | free( peer_list->peers.data ); | 200 | free(peer_list->peers.data); |
| 193 | } | 201 | } |
| 194 | free( peer_list ); | 202 | free(peer_list); |
| 195 | } | 203 | } |
| 196 | 204 | ||
| 197 | static void livesync_handle_peersync( ssize_t datalen ) { | 205 | static void livesync_handle_peersync(ssize_t datalen, size_t peer_size) { |
| 198 | int off = sizeof( g_tracker_id ) + sizeof( uint32_t ); | 206 | int off = sizeof(g_tracker_id) + sizeof(uint32_t); |
| 199 | 207 | ||
| 200 | fprintf( stderr, "." ); | 208 | fprintf(stderr, "."); |
| 201 | 209 | ||
| 202 | while( off + (ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ) <= datalen ) { | 210 | while ((ssize_t)(off + sizeof(ot_hash) + peer_size) <= datalen) { |
| 203 | ot_peer *peer = (ot_peer*)(g_inbuffer + off + sizeof(ot_hash)); | 211 | ot_peer *peer = (ot_peer *)(g_inbuffer + off + sizeof(ot_hash)); |
| 204 | ot_hash *hash = (ot_hash*)(g_inbuffer + off); | 212 | ot_hash *hash = (ot_hash *)(g_inbuffer + off); |
| 205 | 213 | ||
| 206 | if( OT_PEERFLAG(peer) & PEER_FLAG_STOPPED ) | 214 | if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_STOPPED) |
| 207 | remove_peer_from_torrent_proxy( *hash, peer ); | 215 | remove_peer_from_torrent_proxy(*hash, peer, peer_size); |
| 208 | else | 216 | else |
| 209 | add_peer_to_torrent_proxy( *hash, peer ); | 217 | add_peer_to_torrent_proxy(*hash, peer, peer_size); |
| 210 | 218 | ||
| 211 | off += sizeof( ot_hash ) + sizeof( ot_peer ); | 219 | off += sizeof(ot_hash) + peer_size; |
| 212 | } | 220 | } |
| 213 | } | 221 | } |
| 214 | 222 | ||
| 215 | int usage( char *self ) { | 223 | int usage(char *self) { |
| 216 | fprintf( stderr, "Usage: %s -L <livesync_iface_ip> -l <listenip>:<listenport> -c <connectip>:<connectport>\n", self ); | 224 | fprintf(stderr, "Usage: %s -L <livesync_iface_ip> -l <listenip>:<listenport> -c <connectip>:<connectport>\n", self); |
| 217 | return 0; | 225 | return 0; |
| 218 | } | 226 | } |
| 219 | 227 | ||
| @@ -228,115 +236,115 @@ enum { | |||
| 228 | FLAG_MASK = 0x07 | 236 | FLAG_MASK = 0x07 |
| 229 | }; | 237 | }; |
| 230 | 238 | ||
| 231 | #define PROXYPEER_NEEDSCONNECT(flag) ((flag)==FLAG_OUTGOING) | 239 | #define PROXYPEER_NEEDSCONNECT(flag) ((flag) == FLAG_OUTGOING) |
| 232 | #define PROXYPEER_ISCONNECTED(flag) (((flag)&FLAG_MASK)==FLAG_CONNECTED) | 240 | #define PROXYPEER_ISCONNECTED(flag) (((flag) & FLAG_MASK) == FLAG_CONNECTED) |
| 233 | #define PROXYPEER_SETDISCONNECTED(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_DISCONNECTED) | 241 | #define PROXYPEER_SETDISCONNECTED(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_DISCONNECTED) |
| 234 | #define PROXYPEER_SETCONNECTING(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_CONNECTING) | 242 | #define PROXYPEER_SETCONNECTING(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_CONNECTING) |
| 235 | #define PROXYPEER_SETWAITTRACKERID(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_WAITTRACKERID) | 243 | #define PROXYPEER_SETWAITTRACKERID(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_WAITTRACKERID) |
| 236 | #define PROXYPEER_SETCONNECTED(flag) (flag)=(((flag)&FLAG_OUTGOING)|FLAG_CONNECTED) | 244 | #define PROXYPEER_SETCONNECTED(flag) (flag) = (((flag) & FLAG_OUTGOING) | FLAG_CONNECTED) |
| 237 | 245 | ||
| 238 | typedef struct { | 246 | typedef struct { |
| 239 | int state; /* Whether we want to connect, how far our handshake is, etc. */ | 247 | int state; /* Whether we want to connect, how far our handshake is, etc. */ |
| 240 | ot_ip6 ip; /* The peer to connect to */ | 248 | ot_ip6 ip; /* The peer to connect to */ |
| 241 | uint16_t port; /* The peers port */ | 249 | uint16_t port; /* The peers port */ |
| 242 | uint8_t indata[8192*16]; /* Any data not processed yet */ | 250 | uint8_t indata[8192 * 16]; /* Any data not processed yet */ |
| 243 | size_t indata_length; /* Length of unprocessed data */ | 251 | size_t indata_length; /* Length of unprocessed data */ |
| 244 | uint32_t tracker_id; /* How the other end greeted */ | 252 | uint32_t tracker_id; /* How the other end greeted */ |
| 245 | int64 fd; /* A file handle, if connected, <= 0 is disconnected (0 initially, -1 else) */ | 253 | int64 fd; /* A file handle, if connected, <= 0 is disconnected (0 initially, -1 else) */ |
| 246 | io_batch outdata; /* The iobatch containing our sync data */ | 254 | io_batch outdata; /* The iobatch containing our sync data */ |
| 247 | 255 | ||
| 248 | size_t packet_tcount; /* Number of unprocessed torrents in packet we currently receive */ | 256 | size_t packet_tcount; /* Number of unprocessed torrents in packet we currently receive */ |
| 249 | uint8_t packet_tprefix; /* Prefix byte for all torrents in current packet */ | 257 | uint8_t packet_tprefix; /* Prefix byte for all torrents in current packet */ |
| 250 | uint8_t packet_type; /* Type of current packet */ | 258 | uint8_t packet_type; /* Type of current packet */ |
| 251 | uint32_t packet_tid; /* Tracker id for current packet */ | 259 | uint32_t packet_tid; /* Tracker id for current packet */ |
| 252 | 260 | ||
| 253 | } proxy_peer; | 261 | } proxy_peer; |
| 254 | static void process_indata( proxy_peer * peer ); | 262 | static void process_indata(proxy_peer *peer); |
| 255 | 263 | ||
| 256 | void reset_info_block( proxy_peer * peer ) { | 264 | void reset_info_block(proxy_peer *peer) { |
| 257 | peer->indata_length = 0; | 265 | peer->indata_length = 0; |
| 258 | peer->tracker_id = 0; | 266 | peer->tracker_id = 0; |
| 259 | peer->fd = -1; | 267 | peer->fd = -1; |
| 260 | peer->packet_tcount = 0; | 268 | peer->packet_tcount = 0; |
| 261 | iob_reset( &peer->outdata ); | 269 | iob_reset(&peer->outdata); |
| 262 | PROXYPEER_SETDISCONNECTED( peer->state ); | 270 | PROXYPEER_SETDISCONNECTED(peer->state); |
| 263 | } | 271 | } |
| 264 | 272 | ||
| 265 | /* Number of connections to peers | 273 | /* Number of connections to peers |
| 266 | * If a peer's IP is set, we try to reconnect, when the connection drops | 274 | * If a peer's IP is set, we try to reconnect, when the connection drops |
| 267 | * If we already have a connected tracker_id in our records for an _incoming_ connection, drop it | 275 | * If we already have a connected tracker_id in our records for an _incoming_ connection, drop it |
| 268 | * Multiple connections to/from the same ip are okay, if tracker_id doesn't match | 276 | * Multiple connections to/from the same ip are okay, if tracker_id doesn't match |
| 269 | * Reconnect attempts occur only twice a minute | 277 | * Reconnect attempts occur only twice a minute |
| 270 | */ | 278 | */ |
| 271 | static int g_connection_count; | 279 | static int g_connection_count; |
| 272 | static ot_time g_connection_reconn; | 280 | static ot_time g_connection_reconn; |
| 273 | static proxy_peer g_connections[MAX_PEERS]; | 281 | static proxy_peer g_connections[MAX_PEERS]; |
| 274 | 282 | ||
| 275 | static void handle_reconnects( void ) { | 283 | static void handle_reconnects(void) { |
| 276 | int i; | 284 | int i; |
| 277 | for( i=0; i<g_connection_count; ++i ) | 285 | for (i = 0; i < g_connection_count; ++i) |
| 278 | if( PROXYPEER_NEEDSCONNECT( g_connections[i].state ) ) { | 286 | if (PROXYPEER_NEEDSCONNECT(g_connections[i].state)) { |
| 279 | int64 newfd = socket_tcp6( ); | 287 | int64 newfd = socket_tcp6(); |
| 280 | fprintf( stderr, "(Re)connecting to peer..." ); | 288 | fprintf(stderr, "(Re)connecting to peer..."); |
| 281 | if( newfd < 0 ) continue; /* No socket for you */ | 289 | if (newfd < 0) |
| 290 | continue; /* No socket for you */ | ||
| 282 | io_fd(newfd); | 291 | io_fd(newfd); |
| 283 | if( socket_bind6_reuse(newfd,g_serverip,g_serverport,0) ) { | 292 | if (socket_bind6_reuse(newfd, g_serverip, g_serverport, 0)) { |
| 284 | io_close( newfd ); | 293 | io_close(newfd); |
| 285 | continue; | 294 | continue; |
| 286 | } | 295 | } |
| 287 | if( socket_connect6(newfd,g_connections[i].ip,g_connections[i].port,0) == -1 && | 296 | if (socket_connect6(newfd, g_connections[i].ip, g_connections[i].port, 0) == -1 && errno != EINPROGRESS && errno != EWOULDBLOCK) { |
| 288 | errno != EINPROGRESS && errno != EWOULDBLOCK ) { | ||
| 289 | close(newfd); | 297 | close(newfd); |
| 290 | continue; | 298 | continue; |
| 291 | } | 299 | } |
| 292 | io_wantwrite(newfd); /* So we will be informed when it is connected */ | 300 | io_wantwrite(newfd); /* So we will be informed when it is connected */ |
| 293 | io_setcookie(newfd,g_connections+i); | 301 | io_setcookie(newfd, g_connections + i); |
| 294 | 302 | ||
| 295 | /* Prepare connection info block */ | 303 | /* Prepare connection info block */ |
| 296 | reset_info_block( g_connections+i ); | 304 | reset_info_block(g_connections + i); |
| 297 | g_connections[i].fd = newfd; | 305 | g_connections[i].fd = newfd; |
| 298 | PROXYPEER_SETCONNECTING( g_connections[i].state ); | 306 | PROXYPEER_SETCONNECTING(g_connections[i].state); |
| 299 | } | 307 | } |
| 300 | g_connection_reconn = time(NULL) + 30; | 308 | g_connection_reconn = time(NULL) + 30; |
| 301 | } | 309 | } |
| 302 | 310 | ||
| 303 | /* Handle incoming connection requests, check against whitelist */ | 311 | /* Handle incoming connection requests, check against whitelist */ |
| 304 | static void handle_accept( int64 serversocket ) { | 312 | static void handle_accept(int64 serversocket) { |
| 305 | int64 newfd; | 313 | int64 newfd; |
| 306 | ot_ip6 ip; | 314 | ot_ip6 ip; |
| 307 | uint16 port; | 315 | uint16 port; |
| 308 | 316 | ||
| 309 | while( ( newfd = socket_accept6( serversocket, ip, &port, NULL ) ) != -1 ) { | 317 | while ((newfd = socket_accept6(serversocket, ip, &port, NULL)) != -1) { |
| 310 | 318 | ||
| 311 | /* XXX some access control */ | 319 | /* XXX some access control */ |
| 312 | 320 | ||
| 313 | /* Put fd into a non-blocking mode */ | 321 | /* Put fd into a non-blocking mode */ |
| 314 | io_nonblock( newfd ); | 322 | io_nonblock(newfd); |
| 315 | 323 | ||
| 316 | if( !io_fd( newfd ) ) | 324 | if (!io_fd(newfd)) |
| 317 | io_close( newfd ); | 325 | io_close(newfd); |
| 318 | else { | 326 | else { |
| 319 | /* Find a new home for our incoming connection */ | 327 | /* Find a new home for our incoming connection */ |
| 320 | int i; | 328 | int i; |
| 321 | for( i=0; i<MAX_PEERS; ++i ) | 329 | for (i = 0; i < MAX_PEERS; ++i) |
| 322 | if( g_connections[i].state == FLAG_DISCONNECTED ) | 330 | if (g_connections[i].state == FLAG_DISCONNECTED) |
| 323 | break; | 331 | break; |
| 324 | if( i == MAX_PEERS ) { | 332 | if (i == MAX_PEERS) { |
| 325 | fprintf( stderr, "No room for incoming connection." ); | 333 | fprintf(stderr, "No room for incoming connection."); |
| 326 | close( newfd ); | 334 | close(newfd); |
| 327 | continue; | 335 | continue; |
| 328 | } | 336 | } |
| 329 | 337 | ||
| 330 | /* Prepare connection info block */ | 338 | /* Prepare connection info block */ |
| 331 | reset_info_block( g_connections+i ); | 339 | reset_info_block(g_connections + i); |
| 332 | PROXYPEER_SETCONNECTING( g_connections[i].state ); | 340 | PROXYPEER_SETCONNECTING(g_connections[i].state); |
| 333 | g_connections[i].port = port; | 341 | g_connections[i].port = port; |
| 334 | g_connections[i].fd = newfd; | 342 | g_connections[i].fd = newfd; |
| 335 | 343 | ||
| 336 | io_setcookie( newfd, g_connections + i ); | 344 | io_setcookie(newfd, g_connections + i); |
| 337 | 345 | ||
| 338 | /* We expect the connecting side to begin with its tracker_id */ | 346 | /* We expect the connecting side to begin with its tracker_id */ |
| 339 | io_wantread( newfd ); | 347 | io_wantread(newfd); |
| 340 | } | 348 | } |
| 341 | } | 349 | } |
| 342 | 350 | ||
| @@ -344,117 +352,116 @@ static void handle_accept( int64 serversocket ) { | |||
| 344 | } | 352 | } |
| 345 | 353 | ||
| 346 | /* New sync data on the stream */ | 354 | /* New sync data on the stream */ |
| 347 | static void handle_read( int64 peersocket ) { | 355 | static void handle_read(int64 peersocket) { |
| 348 | int i; | 356 | int i; |
| 349 | int64 datalen; | 357 | int64 datalen; |
| 350 | uint32_t tracker_id; | 358 | uint32_t tracker_id; |
| 351 | proxy_peer *peer = io_getcookie( peersocket ); | 359 | proxy_peer *peer = io_getcookie(peersocket); |
| 352 | 360 | ||
| 353 | if( !peer ) { | 361 | if (!peer) { |
| 354 | /* Can't happen ;) */ | 362 | /* Can't happen ;) */ |
| 355 | io_close( peersocket ); | 363 | io_close(peersocket); |
| 356 | return; | 364 | return; |
| 357 | } | 365 | } |
| 358 | switch( peer->state & FLAG_MASK ) { | 366 | switch (peer->state & FLAG_MASK) { |
| 359 | case FLAG_DISCONNECTED: | 367 | case FLAG_DISCONNECTED: |
| 360 | io_close( peersocket ); | 368 | io_close(peersocket); |
| 361 | break; /* Shouldnt happen */ | 369 | break; /* Shouldnt happen */ |
| 362 | case FLAG_CONNECTING: | 370 | case FLAG_CONNECTING: |
| 363 | case FLAG_WAITTRACKERID: | 371 | case FLAG_WAITTRACKERID: |
| 364 | /* We want at least the first four bytes to come at once, to avoid keeping extra states (for now) | 372 | /* We want at least the first four bytes to come at once, to avoid keeping extra states (for now) |
| 365 | This also catches 0 bytes reads == EOF and negative values, denoting connection errors */ | 373 | This also catches 0 bytes reads == EOF and negative values, denoting connection errors */ |
| 366 | if( io_tryread( peersocket, (void*)&tracker_id, sizeof( tracker_id ) ) != sizeof( tracker_id ) ) | 374 | if (io_tryread(peersocket, (void *)&tracker_id, sizeof(tracker_id)) != sizeof(tracker_id)) |
| 367 | goto close_socket; | 375 | goto close_socket; |
| 368 | 376 | ||
| 369 | /* See, if we already have a connection to that peer */ | 377 | /* See, if we already have a connection to that peer */ |
| 370 | for( i=0; i<MAX_PEERS; ++i ) | 378 | for (i = 0; i < MAX_PEERS; ++i) |
| 371 | if( ( g_connections[i].state & FLAG_MASK ) == FLAG_CONNECTED && | 379 | if ((g_connections[i].state & FLAG_MASK) == FLAG_CONNECTED && g_connections[i].tracker_id == tracker_id) { |
| 372 | g_connections[i].tracker_id == tracker_id ) { | 380 | fprintf(stderr, "Peer already connected. Closing connection.\n"); |
| 373 | fprintf( stderr, "Peer already connected. Closing connection.\n" ); | ||
| 374 | goto close_socket; | 381 | goto close_socket; |
| 375 | } | 382 | } |
| 376 | 383 | ||
| 377 | /* Also no need for soliloquy */ | 384 | /* Also no need for soliloquy */ |
| 378 | if( tracker_id == g_tracker_id ) | 385 | if (tracker_id == g_tracker_id) |
| 379 | goto close_socket; | 386 | goto close_socket; |
| 380 | 387 | ||
| 381 | /* The new connection is good, send our tracker_id on incoming connections */ | 388 | /* The new connection is good, send our tracker_id on incoming connections */ |
| 382 | if( peer->state == FLAG_CONNECTING ) | 389 | if (peer->state == FLAG_CONNECTING) |
| 383 | if( io_trywrite( peersocket, (void*)&g_tracker_id, sizeof( g_tracker_id ) ) != sizeof( g_tracker_id ) ) | 390 | if (io_trywrite(peersocket, (void *)&g_tracker_id, sizeof(g_tracker_id)) != sizeof(g_tracker_id)) |
| 384 | goto close_socket; | 391 | goto close_socket; |
| 385 | 392 | ||
| 386 | peer->tracker_id = tracker_id; | 393 | peer->tracker_id = tracker_id; |
| 387 | PROXYPEER_SETCONNECTED( peer->state ); | 394 | PROXYPEER_SETCONNECTED(peer->state); |
| 388 | 395 | ||
| 389 | if( peer->state & FLAG_OUTGOING ) | 396 | if (peer->state & FLAG_OUTGOING) |
| 390 | fprintf( stderr, "succeeded.\n" ); | 397 | fprintf(stderr, "succeeded.\n"); |
| 391 | else | 398 | else |
| 392 | fprintf( stderr, "Incoming connection successful.\n" ); | 399 | fprintf(stderr, "Incoming connection successful.\n"); |
| 393 | 400 | ||
| 394 | break; | 401 | break; |
| 395 | close_socket: | 402 | close_socket: |
| 396 | fprintf( stderr, "Handshake incomplete, closing socket\n" ); | 403 | fprintf(stderr, "Handshake incomplete, closing socket\n"); |
| 397 | io_close( peersocket ); | 404 | io_close(peersocket); |
| 398 | reset_info_block( peer ); | 405 | reset_info_block(peer); |
| 399 | break; | 406 | break; |
| 400 | case FLAG_CONNECTED: | 407 | case FLAG_CONNECTED: |
| 401 | /* Here we acutally expect data from peer | 408 | /* Here we acutally expect data from peer |
| 402 | indata_length should be less than 20+256*7 bytes, for incomplete torrent entries */ | 409 | indata_length should be less than 20+256*7 bytes, for incomplete torrent entries */ |
| 403 | datalen = io_tryread( peersocket, (void*)(peer->indata + peer->indata_length), sizeof( peer->indata ) - peer->indata_length ); | 410 | datalen = io_tryread(peersocket, (void *)(peer->indata + peer->indata_length), sizeof(peer->indata) - peer->indata_length); |
| 404 | if( !datalen || datalen < -1 ) { | 411 | if (!datalen || datalen < -1) { |
| 405 | fprintf( stderr, "Connection closed by remote peer.\n" ); | 412 | fprintf(stderr, "Connection closed by remote peer.\n"); |
| 406 | io_close( peersocket ); | 413 | io_close(peersocket); |
| 407 | reset_info_block( peer ); | 414 | reset_info_block(peer); |
| 408 | } else if( datalen > 0 ) { | 415 | } else if (datalen > 0) { |
| 409 | peer->indata_length += datalen; | 416 | peer->indata_length += datalen; |
| 410 | process_indata( peer ); | 417 | process_indata(peer); |
| 411 | } | 418 | } |
| 412 | break; | 419 | break; |
| 413 | } | 420 | } |
| 414 | } | 421 | } |
| 415 | 422 | ||
| 416 | /* Can write new sync data to the stream */ | 423 | /* Can write new sync data to the stream */ |
| 417 | static void handle_write( int64 peersocket ) { | 424 | static void handle_write(int64 peersocket) { |
| 418 | proxy_peer *peer = io_getcookie( peersocket ); | 425 | proxy_peer *peer = io_getcookie(peersocket); |
| 419 | 426 | ||
| 420 | if( !peer ) { | 427 | if (!peer) { |
| 421 | /* Can't happen ;) */ | 428 | /* Can't happen ;) */ |
| 422 | io_close( peersocket ); | 429 | io_close(peersocket); |
| 423 | return; | 430 | return; |
| 424 | } | 431 | } |
| 425 | 432 | ||
| 426 | switch( peer->state & FLAG_MASK ) { | 433 | switch (peer->state & FLAG_MASK) { |
| 427 | case FLAG_DISCONNECTED: | 434 | case FLAG_DISCONNECTED: |
| 428 | default: /* Should not happen */ | 435 | default: /* Should not happen */ |
| 429 | io_close( peersocket ); | 436 | io_close(peersocket); |
| 430 | break; | 437 | break; |
| 431 | case FLAG_CONNECTING: | 438 | case FLAG_CONNECTING: |
| 432 | /* Ensure that the connection is established and handle connection error */ | 439 | /* Ensure that the connection is established and handle connection error */ |
| 433 | if( peer->state & FLAG_OUTGOING && !socket_connected( peersocket ) ) { | 440 | if (peer->state & FLAG_OUTGOING && !socket_connected(peersocket)) { |
| 434 | fprintf( stderr, "failed\n" ); | 441 | fprintf(stderr, "failed\n"); |
| 435 | reset_info_block( peer ); | 442 | reset_info_block(peer); |
| 436 | io_close( peersocket ); | 443 | io_close(peersocket); |
| 437 | break; | 444 | break; |
| 438 | } | 445 | } |
| 439 | 446 | ||
| 440 | if( io_trywrite( peersocket, (void*)&g_tracker_id, sizeof( g_tracker_id ) ) == sizeof( g_tracker_id ) ) { | 447 | if (io_trywrite(peersocket, (void *)&g_tracker_id, sizeof(g_tracker_id)) == sizeof(g_tracker_id)) { |
| 441 | PROXYPEER_SETWAITTRACKERID( peer->state ); | 448 | PROXYPEER_SETWAITTRACKERID(peer->state); |
| 442 | io_dontwantwrite( peersocket ); | 449 | io_dontwantwrite(peersocket); |
| 443 | io_wantread( peersocket ); | 450 | io_wantread(peersocket); |
| 444 | } else { | 451 | } else { |
| 445 | fprintf( stderr, "Handshake incomplete, closing socket\n" ); | 452 | fprintf(stderr, "Handshake incomplete, closing socket\n"); |
| 446 | io_close( peersocket ); | 453 | io_close(peersocket); |
| 447 | reset_info_block( peer ); | 454 | reset_info_block(peer); |
| 448 | } | 455 | } |
| 449 | break; | 456 | break; |
| 450 | case FLAG_CONNECTED: | 457 | case FLAG_CONNECTED: |
| 451 | switch( iob_send( peersocket, &peer->outdata ) ) { | 458 | switch (iob_send(peersocket, &peer->outdata)) { |
| 452 | case 0: /* all data sent */ | 459 | case 0: /* all data sent */ |
| 453 | io_dontwantwrite( peersocket ); | 460 | io_dontwantwrite(peersocket); |
| 454 | break; | 461 | break; |
| 455 | case -3: /* an error occured */ | 462 | case -3: /* an error occured */ |
| 456 | io_close( peersocket ); | 463 | io_close(peersocket); |
| 457 | reset_info_block( peer ); | 464 | reset_info_block(peer); |
| 458 | break; | 465 | break; |
| 459 | default: /* Normal operation or eagain */ | 466 | default: /* Normal operation or eagain */ |
| 460 | break; | 467 | break; |
| @@ -469,290 +476,324 @@ static void server_mainloop() { | |||
| 469 | int64 sock; | 476 | int64 sock; |
| 470 | 477 | ||
| 471 | /* inlined livesync_init() */ | 478 | /* inlined livesync_init() */ |
| 472 | memset( g_peerbuffer_start, 0, sizeof( g_peerbuffer_start ) ); | 479 | memset(g_peerbuffer_start, 0, sizeof(g_peerbuffer_start)); |
| 473 | g_peerbuffer_pos = g_peerbuffer_start; | 480 | g_peerbuffer_pos = g_peerbuffer_start; |
| 474 | memcpy( g_peerbuffer_pos, &g_tracker_id, sizeof( g_tracker_id ) ); | 481 | memcpy(g_peerbuffer_pos, &g_tracker_id, sizeof(g_tracker_id)); |
| 475 | uint32_pack_big( (char*)g_peerbuffer_pos + sizeof( g_tracker_id ), OT_SYNC_PEER); | 482 | uint32_pack_big((char *)g_peerbuffer_pos + sizeof(g_tracker_id), OT_SYNC_PEER); |
| 476 | g_peerbuffer_pos += sizeof( g_tracker_id ) + sizeof( uint32_t); | 483 | g_peerbuffer_pos += sizeof(g_tracker_id) + sizeof(uint32_t); |
| 477 | g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY; | 484 | g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY; |
| 478 | 485 | ||
| 479 | while(1) { | 486 | while (1) { |
| 480 | /* See, if we need to connect to anyone */ | 487 | /* See if we need to connect to anyone */ |
| 481 | if( time(NULL) > g_connection_reconn ) | 488 | if (time(NULL) > g_connection_reconn) |
| 482 | handle_reconnects( ); | 489 | handle_reconnects(); |
| 483 | 490 | ||
| 484 | /* Wait for io events until next approx reconn check time */ | 491 | /* Wait for io events until next approx reconn check time */ |
| 485 | io_waituntil2( 30*1000 ); | 492 | io_waituntil2(30 * 1000); |
| 486 | 493 | ||
| 487 | /* Loop over readable sockets */ | 494 | /* Loop over readable sockets */ |
| 488 | while( ( sock = io_canread( ) ) != -1 ) { | 495 | while ((sock = io_canread()) != -1) { |
| 489 | const void *cookie = io_getcookie( sock ); | 496 | const void *cookie = io_getcookie(sock); |
| 490 | if( (uintptr_t)cookie == FLAG_SERVERSOCKET ) | 497 | if ((uintptr_t)cookie == FLAG_SERVERSOCKET) |
| 491 | handle_accept( sock ); | 498 | handle_accept(sock); |
| 492 | else | 499 | else |
| 493 | handle_read( sock ); | 500 | handle_read(sock); |
| 494 | } | 501 | } |
| 495 | 502 | ||
| 496 | /* Loop over writable sockets */ | 503 | /* Loop over writable sockets */ |
| 497 | while( ( sock = io_canwrite( ) ) != -1 ) | 504 | while ((sock = io_canwrite()) != -1) |
| 498 | handle_write( sock ); | 505 | handle_write(sock); |
| 499 | 506 | ||
| 500 | livesync_ticker( ); | 507 | livesync_ticker(); |
| 501 | } | 508 | } |
| 502 | } | 509 | } |
| 503 | 510 | ||
| 504 | static void panic( const char *routine ) { | 511 | static void panic(const char *routine) { |
| 505 | fprintf( stderr, "%s: %s\n", routine, strerror(errno) ); | 512 | fprintf(stderr, "%s: %s\n", routine, strerror(errno)); |
| 506 | exit( 111 ); | 513 | exit(111); |
| 507 | } | 514 | } |
| 508 | 515 | ||
| 509 | static int64_t ot_try_bind( ot_ip6 ip, uint16_t port ) { | 516 | static int64_t ot_try_bind(ot_ip6 ip, uint16_t port) { |
| 510 | int64 sock = socket_tcp6( ); | 517 | int64 sock = socket_tcp6(); |
| 511 | 518 | ||
| 512 | if( socket_bind6_reuse( sock, ip, port, 0 ) == -1 ) | 519 | if (socket_bind6_reuse(sock, ip, port, 0) == -1) |
| 513 | panic( "socket_bind6_reuse" ); | 520 | panic("socket_bind6_reuse"); |
| 514 | 521 | ||
| 515 | if( socket_listen( sock, SOMAXCONN) == -1 ) | 522 | if (socket_listen(sock, SOMAXCONN) == -1) |
| 516 | panic( "socket_listen" ); | 523 | panic("socket_listen"); |
| 517 | 524 | ||
| 518 | if( !io_fd( sock ) ) | 525 | if (!io_fd(sock)) |
| 519 | panic( "io_fd" ); | 526 | panic("io_fd"); |
| 520 | 527 | ||
| 521 | io_setcookie( sock, (void*)FLAG_SERVERSOCKET ); | 528 | io_setcookie(sock, (void *)FLAG_SERVERSOCKET); |
| 522 | io_wantread( sock ); | 529 | io_wantread(sock); |
| 523 | return sock; | 530 | return sock; |
| 524 | } | 531 | } |
| 525 | 532 | ||
| 526 | 533 | static int scan_ip6_port(const char *src, ot_ip6 ip, uint16 *port) { | |
| 527 | static int scan_ip6_port( const char *src, ot_ip6 ip, uint16 *port ) { | ||
| 528 | const char *s = src; | 534 | const char *s = src; |
| 529 | int off, bracket = 0; | 535 | int off, bracket = 0; |
| 530 | while( isspace(*s) ) ++s; | 536 | while (isspace(*s)) |
| 531 | if( *s == '[' ) ++s, ++bracket; /* for v6 style notation */ | 537 | ++s; |
| 532 | if( !(off = scan_ip6( s, ip ) ) ) | 538 | if (*s == '[') |
| 539 | ++s, ++bracket; /* for v6 style notation */ | ||
| 540 | if (!(off = scan_ip6(s, ip))) | ||
| 533 | return 0; | 541 | return 0; |
| 534 | s += off; | 542 | s += off; |
| 535 | if( *s == 0 || isspace(*s)) return s-src; | 543 | if (*s == 0 || isspace(*s)) |
| 536 | if( *s == ']' && bracket ) ++s; | 544 | return s - src; |
| 537 | if( !ip6_isv4mapped(ip)){ | 545 | if (*s == ']' && bracket) |
| 538 | if( ( bracket && *(s) != ':' ) || ( *(s) != '.' ) ) return 0; | 546 | ++s; |
| 547 | if (!ip6_isv4mapped(ip)) { | ||
| 548 | if ((bracket && *(s) != ':') || (*(s) != '.')) | ||
| 549 | return 0; | ||
| 539 | s++; | 550 | s++; |
| 540 | } else { | 551 | } else { |
| 541 | if( *(s++) != ':' ) return 0; | 552 | if (*(s++) != ':') |
| 553 | return 0; | ||
| 542 | } | 554 | } |
| 543 | if( !(off = scan_ushort (s, port ) ) ) | 555 | if (!(off = scan_ushort(s, port))) |
| 544 | return 0; | 556 | return 0; |
| 545 | return off+s-src; | 557 | return off + s - src; |
| 546 | } | 558 | } |
| 547 | 559 | ||
| 548 | int main( int argc, char **argv ) { | 560 | int main(int argc, char **argv) { |
| 549 | static pthread_t sync_in_thread_id; | 561 | static pthread_t sync_in_thread_id; |
| 550 | static pthread_t sync_out_thread_id; | 562 | static pthread_t sync_out_thread_id; |
| 551 | ot_ip6 serverip; | 563 | ot_ip6 serverip; |
| 552 | uint16_t tmpport; | 564 | uint16_t tmpport; |
| 553 | int scanon = 1, lbound = 0, sbound = 0; | 565 | int scanon = 1, lbound = 0, sbound = 0; |
| 554 | 566 | ||
| 555 | srandom( time(NULL) ); | 567 | srandom(time(NULL)); |
| 556 | #ifdef WANT_ARC4RANDOM | 568 | #ifdef WANT_ARC4RANDOM |
| 557 | g_tracker_id = arc4random(); | 569 | g_tracker_id = arc4random(); |
| 558 | #else | 570 | #else |
| 559 | g_tracker_id = random(); | 571 | g_tracker_id = random(); |
| 560 | #endif | 572 | #endif |
| 561 | noipv6=1; | ||
| 562 | 573 | ||
| 563 | while( scanon ) { | 574 | while (scanon) { |
| 564 | switch( getopt( argc, argv, ":l:c:L:h" ) ) { | 575 | switch (getopt(argc, argv, ":l:c:L:h")) { |
| 565 | case -1: scanon = 0; break; | 576 | case -1: |
| 577 | scanon = 0; | ||
| 578 | break; | ||
| 566 | case 'l': | 579 | case 'l': |
| 567 | tmpport = 0; | 580 | tmpport = 0; |
| 568 | if( !scan_ip6_port( optarg, serverip, &tmpport ) || !tmpport ) { usage( argv[0] ); exit( 1 ); } | 581 | if (!scan_ip6_port(optarg, serverip, &tmpport) || !tmpport) { |
| 569 | ot_try_bind( serverip, tmpport ); | 582 | usage(argv[0]); |
| 583 | exit(1); | ||
| 584 | } | ||
| 585 | ot_try_bind(serverip, tmpport); | ||
| 570 | ++sbound; | 586 | ++sbound; |
| 571 | break; | 587 | break; |
| 572 | case 'c': | 588 | case 'c': |
| 573 | if( g_connection_count > MAX_PEERS / 2 ) exerr( "Connection limit exceeded.\n" ); | 589 | if (g_connection_count > MAX_PEERS / 2) |
| 590 | exerr("Connection limit exceeded.\n"); | ||
| 574 | tmpport = 0; | 591 | tmpport = 0; |
| 575 | if( !scan_ip6_port( optarg, | 592 | if (!scan_ip6_port(optarg, g_connections[g_connection_count].ip, &g_connections[g_connection_count].port) || !g_connections[g_connection_count].port) { |
| 576 | g_connections[g_connection_count].ip, | 593 | usage(argv[0]); |
| 577 | &g_connections[g_connection_count].port ) || | 594 | exit(1); |
| 578 | !g_connections[g_connection_count].port ) { usage( argv[0] ); exit( 1 ); } | 595 | } |
| 579 | g_connections[g_connection_count++].state = FLAG_OUTGOING; | 596 | g_connections[g_connection_count++].state = FLAG_OUTGOING; |
| 580 | break; | 597 | break; |
| 581 | case 'L': | 598 | case 'L': |
| 582 | tmpport = 9696; | 599 | tmpport = 9696; |
| 583 | if( !scan_ip6_port( optarg, serverip, &tmpport ) || !tmpport ) { usage( argv[0] ); exit( 1 ); } | 600 | if (!scan_ip6_port(optarg, serverip, &tmpport) || !tmpport) { |
| 584 | livesync_bind_mcast( serverip, tmpport); ++lbound; break; | 601 | usage(argv[0]); |
| 602 | exit(1); | ||
| 603 | } | ||
| 604 | livesync_bind_mcast(serverip, tmpport); | ||
| 605 | ++lbound; | ||
| 606 | break; | ||
| 585 | default: | 607 | default: |
| 586 | case '?': usage( argv[0] ); exit( 1 ); | 608 | case '?': |
| 609 | usage(argv[0]); | ||
| 610 | exit(1); | ||
| 587 | } | 611 | } |
| 588 | } | 612 | } |
| 589 | 613 | ||
| 590 | if( !lbound ) exerr( "No livesync port bound." ); | 614 | if (!lbound) |
| 591 | if( !g_connection_count && !sbound ) exerr( "No streamsync port bound." ); | 615 | exerr("No livesync port bound."); |
| 592 | pthread_create( &sync_in_thread_id, NULL, livesync_worker, NULL ); | 616 | if (!g_connection_count && !sbound) |
| 593 | pthread_create( &sync_out_thread_id, NULL, streamsync_worker, NULL ); | 617 | exerr("No streamsync port bound."); |
| 618 | pthread_create(&sync_in_thread_id, NULL, livesync_worker, NULL); | ||
| 619 | pthread_create(&sync_out_thread_id, NULL, streamsync_worker, NULL); | ||
| 594 | 620 | ||
| 595 | server_mainloop(); | 621 | server_mainloop(); |
| 596 | return 0; | 622 | return 0; |
| 597 | } | 623 | } |
| 598 | 624 | ||
| 599 | static void * streamsync_worker( void * args ) { | 625 | static void *streamsync_worker(void *args) { |
| 600 | (void)args; | 626 | (void)args; |
| 601 | while( 1 ) { | 627 | while (1) { |
| 602 | int bucket; | 628 | int bucket; |
| 603 | /* For each bucket... */ | 629 | /* For each bucket... */ |
| 604 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 630 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
| 605 | /* Get exclusive access to that bucket */ | 631 | /* Get exclusive access to that bucket */ |
| 606 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 632 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
| 607 | size_t tor_offset, count_def = 0, count_one = 0, count_two = 0, count_peers = 0; | 633 | size_t tor_offset, count_def = 0, count_one = 0, count_two = 0, count_peers = 0; |
| 608 | size_t mem, mem_a = 0, mem_b = 0; | 634 | size_t mem, mem_a = 0, mem_b = 0; |
| 609 | uint8_t *ptr = 0, *ptr_a, *ptr_b, *ptr_c; | 635 | uint8_t *ptr = 0, *ptr_a, *ptr_b, *ptr_c; |
| 610 | 636 | ||
| 611 | if( !torrents_list->size ) goto unlock_continue; | 637 | if (!torrents_list->size) |
| 638 | goto unlock_continue; | ||
| 612 | 639 | ||
| 613 | /* For each torrent in this bucket.. */ | 640 | /* For each torrent in this bucket.. */ |
| 614 | for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { | 641 | for (tor_offset = 0; tor_offset < torrents_list->size; ++tor_offset) { |
| 615 | /* Address torrents members */ | 642 | /* Address torrents members */ |
| 616 | ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[tor_offset] ).peer_list; | 643 | ot_peerlist *peer_list = (((ot_torrent *)(torrents_list->data))[tor_offset]).peer_list; |
| 617 | switch( peer_list->peer_count ) { | 644 | switch (peer_list->peer_count) { |
| 618 | case 2: count_two++; break; | 645 | case 2: |
| 619 | case 1: count_one++; break; | 646 | count_two++; |
| 620 | case 0: break; | 647 | break; |
| 621 | default: count_def++; | 648 | case 1: |
| 622 | count_peers += peer_list->peer_count; | 649 | count_one++; |
| 650 | break; | ||
| 651 | case 0: | ||
| 652 | break; | ||
| 653 | default: | ||
| 654 | count_def++; | ||
| 655 | count_peers += peer_list->peer_count; | ||
| 623 | } | 656 | } |
| 624 | } | 657 | } |
| 625 | 658 | ||
| 626 | /* Maximal memory requirement: max 3 blocks, max torrents * 20 + max peers * 7 */ | 659 | /* Maximal memory requirement: max 3 blocks, max torrents * 20 + max peers * 7 */ |
| 627 | mem = 3 * ( 1 + 1 + 2 ) + ( count_one + count_two ) * ( 19 + 1 ) + count_def * ( 19 + 8 ) + | 660 | mem = 3 * (1 + 1 + 2) + (count_one + count_two) * (19 + 1) + count_def * (19 + 8) + (count_one + 2 * count_two + count_peers) * 7; |
| 628 | ( count_one + 2 * count_two + count_peers ) * 7; | 661 | |
| 629 | 662 | fprintf(stderr, "Mem: %zd\n", mem); | |
| 630 | fprintf( stderr, "Mem: %zd\n", mem ); | 663 | |
| 631 | 664 | ptr = ptr_a = ptr_b = ptr_c = malloc(mem); | |
| 632 | ptr = ptr_a = ptr_b = ptr_c = malloc( mem ); | 665 | if (!ptr) |
| 633 | if( !ptr ) goto unlock_continue; | 666 | goto unlock_continue; |
| 634 | 667 | ||
| 635 | if( count_one > 4 || !count_def ) { | 668 | if (count_one > 4 || !count_def) { |
| 636 | mem_a = 1 + 1 + 2 + count_one * ( 19 + 7 ); | 669 | mem_a = 1 + 1 + 2 + count_one * (19 + 7); |
| 637 | ptr_b += mem_a; ptr_c += mem_a; | 670 | ptr_b += mem_a; |
| 638 | ptr_a[0] = 1; /* Offset 0: packet type 1 */ | 671 | ptr_c += mem_a; |
| 639 | ptr_a[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ | 672 | ptr_a[0] = 1; /* Offset 0: packet type 1 */ |
| 640 | ptr_a[2] = count_one >> 8; | 673 | ptr_a[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ |
| 641 | ptr_a[3] = count_one & 255; | 674 | ptr_a[2] = count_one >> 8; |
| 642 | ptr_a += 4; | 675 | ptr_a[3] = count_one & 255; |
| 676 | ptr_a += 4; | ||
| 643 | } else | 677 | } else |
| 644 | count_def += count_one; | 678 | count_def += count_one; |
| 645 | 679 | ||
| 646 | if( count_two > 4 || !count_def ) { | 680 | if (count_two > 4 || !count_def) { |
| 647 | mem_b = 1 + 1 + 2 + count_two * ( 19 + 14 ); | 681 | mem_b = 1 + 1 + 2 + count_two * (19 + 14); |
| 648 | ptr_c += mem_b; | 682 | ptr_c += mem_b; |
| 649 | ptr_b[0] = 2; /* Offset 0: packet type 2 */ | 683 | ptr_b[0] = 2; /* Offset 0: packet type 2 */ |
| 650 | ptr_b[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ | 684 | ptr_b[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ |
| 651 | ptr_b[2] = count_two >> 8; | 685 | ptr_b[2] = count_two >> 8; |
| 652 | ptr_b[3] = count_two & 255; | 686 | ptr_b[3] = count_two & 255; |
| 653 | ptr_b += 4; | 687 | ptr_b += 4; |
| 654 | } else | 688 | } else |
| 655 | count_def += count_two; | 689 | count_def += count_two; |
| 656 | 690 | ||
| 657 | if( count_def ) { | 691 | if (count_def) { |
| 658 | ptr_c[0] = 0; /* Offset 0: packet type 0 */ | 692 | ptr_c[0] = 0; /* Offset 0: packet type 0 */ |
| 659 | ptr_c[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ | 693 | ptr_c[1] = (bucket << 8) >> OT_BUCKET_COUNT_BITS; /* Offset 1: the shared prefix */ |
| 660 | ptr_c[2] = count_def >> 8; | 694 | ptr_c[2] = count_def >> 8; |
| 661 | ptr_c[3] = count_def & 255; | 695 | ptr_c[3] = count_def & 255; |
| 662 | ptr_c += 4; | 696 | ptr_c += 4; |
| 663 | } | 697 | } |
| 664 | 698 | ||
| 665 | /* For each torrent in this bucket.. */ | 699 | /* For each torrent in this bucket.. */ |
| 666 | for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) { | 700 | for (tor_offset = 0; tor_offset < torrents_list->size; ++tor_offset) { |
| 667 | /* Address torrents members */ | 701 | /* Address torrents members */ |
| 668 | ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + tor_offset; | 702 | ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + tor_offset; |
| 669 | ot_peerlist *peer_list = torrent->peer_list; | 703 | ot_peerlist *peer_list = torrent->peer_list; |
| 670 | ot_peer *peers = (ot_peer*)(peer_list->peers.data); | 704 | ot_peer *peers = (ot_peer *)(peer_list->peers.data); |
| 671 | uint8_t **dst; | 705 | uint8_t **dst; |
| 672 | 706 | ||
| 673 | /* Determine destination slot */ | 707 | /* Determine destination slot */ |
| 674 | count_peers = peer_list->peer_count; | 708 | count_peers = peer_list->peer_count; |
| 675 | switch( count_peers ) { | 709 | switch (count_peers) { |
| 676 | case 0: continue; | 710 | case 0: |
| 677 | case 1: dst = mem_a ? &ptr_a : &ptr_c; break; | 711 | continue; |
| 678 | case 2: dst = mem_b ? &ptr_b : &ptr_c; break; | 712 | case 1: |
| 679 | default: dst = &ptr_c; break; | 713 | dst = mem_a ? &ptr_a : &ptr_c; |
| 714 | break; | ||
| 715 | case 2: | ||
| 716 | dst = mem_b ? &ptr_b : &ptr_c; | ||
| 717 | break; | ||
| 718 | default: | ||
| 719 | dst = &ptr_c; | ||
| 720 | break; | ||
| 680 | } | 721 | } |
| 681 | 722 | ||
| 682 | /* Copy tail of info_hash, advance pointer */ | 723 | /* Copy tail of info_hash, advance pointer */ |
| 683 | memcpy( *dst, ((uint8_t*)torrent->hash) + 1, sizeof( ot_hash ) - 1); | 724 | memcpy(*dst, ((uint8_t *)torrent->hash) + 1, sizeof(ot_hash) - 1); |
| 684 | *dst += sizeof( ot_hash ) - 1; | 725 | *dst += sizeof(ot_hash) - 1; |
| 685 | 726 | ||
| 686 | /* Encode peer count */ | 727 | /* Encode peer count */ |
| 687 | if( dst == &ptr_c ) | 728 | if (dst == &ptr_c) |
| 688 | while( count_peers ) { | 729 | while (count_peers) { |
| 689 | if( count_peers <= 0x7f ) | 730 | if (count_peers <= 0x7f) |
| 690 | *(*dst)++ = count_peers; | 731 | *(*dst)++ = count_peers; |
| 691 | else | 732 | else |
| 692 | *(*dst)++ = 0x80 | ( count_peers & 0x7f ); | 733 | *(*dst)++ = 0x80 | (count_peers & 0x7f); |
| 693 | count_peers >>= 7; | 734 | count_peers >>= 7; |
| 694 | } | 735 | } |
| 695 | 736 | ||
| 696 | /* Copy peers */ | 737 | /* Copy peers */ |
| 697 | count_peers = peer_list->peer_count; | 738 | count_peers = peer_list->peer_count; |
| 698 | while( count_peers-- ) { | 739 | while (count_peers--) { |
| 699 | memcpy( *dst, peers++, OT_IP_SIZE + 3 ); | 740 | memcpy(*dst, peers++, OT_IP_SIZE + 3); |
| 700 | *dst += OT_IP_SIZE + 3; | 741 | *dst += OT_IP_SIZE + 3; |
| 701 | } | 742 | } |
| 702 | free_peerlist(peer_list); | 743 | free_peerlist(peer_list); |
| 703 | } | 744 | } |
| 704 | 745 | ||
| 705 | free( torrents_list->data ); | 746 | free(torrents_list->data); |
| 706 | memset( torrents_list, 0, sizeof(*torrents_list ) ); | 747 | memset(torrents_list, 0, sizeof(*torrents_list)); |
| 707 | unlock_continue: | 748 | unlock_continue: |
| 708 | mutex_bucket_unlock( bucket, 0 ); | 749 | mutex_bucket_unlock(bucket, 0); |
| 709 | 750 | ||
| 710 | if( ptr ) { | 751 | if (ptr) { |
| 711 | int i; | 752 | int i; |
| 712 | 753 | ||
| 713 | if( ptr_b > ptr_c ) ptr_c = ptr_b; | 754 | if (ptr_b > ptr_c) |
| 714 | if( ptr_a > ptr_c ) ptr_c = ptr_a; | 755 | ptr_c = ptr_b; |
| 756 | if (ptr_a > ptr_c) | ||
| 757 | ptr_c = ptr_a; | ||
| 715 | mem = ptr_c - ptr; | 758 | mem = ptr_c - ptr; |
| 716 | 759 | ||
| 717 | for( i=0; i < MAX_PEERS; ++i ) { | 760 | for (i = 0; i < MAX_PEERS; ++i) { |
| 718 | if( PROXYPEER_ISCONNECTED(g_connections[i].state) ) { | 761 | if (PROXYPEER_ISCONNECTED(g_connections[i].state)) { |
| 719 | void *tmp = malloc( mem ); | 762 | void *tmp = malloc(mem); |
| 720 | if( tmp ) { | 763 | if (tmp) { |
| 721 | memcpy( tmp, ptr, mem ); | 764 | memcpy(tmp, ptr, mem); |
| 722 | iob_addbuf_free( &g_connections[i].outdata, tmp, mem ); | 765 | iob_addbuf_free(&g_connections[i].outdata, tmp, mem); |
| 723 | io_wantwrite( g_connections[i].fd ); | 766 | io_wantwrite(g_connections[i].fd); |
| 724 | } | 767 | } |
| 725 | } | 768 | } |
| 726 | } | 769 | } |
| 727 | 770 | ||
| 728 | free( ptr ); | 771 | free(ptr); |
| 729 | } | 772 | } |
| 730 | usleep( OT_SYNC_SLEEP ); | 773 | usleep(OT_SYNC_SLEEP); |
| 731 | } | 774 | } |
| 732 | } | 775 | } |
| 733 | return 0; | 776 | return 0; |
| 734 | } | 777 | } |
| 735 | 778 | ||
| 736 | static void livesync_issue_peersync( ) { | 779 | static void livesync_issue_peersync() { |
| 737 | socket_send4(g_socket_out, (char*)g_peerbuffer_start, g_peerbuffer_pos - g_peerbuffer_start, | 780 | socket_send4(g_socket_out, (char *)g_peerbuffer_start, g_peerbuffer_pos - g_peerbuffer_start, groupip_1, LIVESYNC_PORT); |
| 738 | groupip_1, LIVESYNC_PORT); | 781 | g_peerbuffer_pos = g_peerbuffer_start + sizeof(g_tracker_id) + sizeof(uint32_t); |
| 739 | g_peerbuffer_pos = g_peerbuffer_start + sizeof( g_tracker_id ) + sizeof( uint32_t ); | ||
| 740 | g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY; | 782 | g_next_packet_time = time(NULL) + LIVESYNC_MAXDELAY; |
| 741 | } | 783 | } |
| 742 | 784 | ||
| 743 | void livesync_ticker( ) { | 785 | void livesync_ticker() { |
| 744 | /* livesync_issue_peersync sets g_next_packet_time */ | 786 | /* livesync_issue_peersync sets g_next_packet_time */ |
| 745 | if( time(NULL) > g_next_packet_time && | 787 | if (time(NULL) > g_next_packet_time && g_peerbuffer_pos > g_peerbuffer_start + sizeof(g_tracker_id)) |
| 746 | g_peerbuffer_pos > g_peerbuffer_start + sizeof( g_tracker_id ) ) | ||
| 747 | livesync_issue_peersync(); | 788 | livesync_issue_peersync(); |
| 748 | } | 789 | } |
| 749 | 790 | ||
| 750 | static void livesync_proxytell( uint8_t prefix, uint8_t *info_hash, uint8_t *peer ) { | 791 | static void livesync_proxytell(uint8_t prefix, uint8_t *info_hash, uint8_t *peer) { |
| 751 | // unsigned int i; | 792 | // unsigned int i; |
| 752 | 793 | ||
| 753 | *g_peerbuffer_pos = prefix; | 794 | *g_peerbuffer_pos = prefix; |
| 754 | memcpy( g_peerbuffer_pos + 1, info_hash, sizeof(ot_hash) - 1 ); | 795 | memcpy(g_peerbuffer_pos + 1, info_hash, sizeof(ot_hash) - 1); |
| 755 | memcpy( g_peerbuffer_pos + sizeof(ot_hash), peer, sizeof(ot_peer) - 1 ); | 796 | memcpy(g_peerbuffer_pos + sizeof(ot_hash), peer, sizeof(ot_peer) - 1); |
| 756 | 797 | ||
| 757 | #if 0 | 798 | #if 0 |
| 758 | /* Dump info_hash */ | 799 | /* Dump info_hash */ |
| @@ -767,77 +808,84 @@ static void livesync_proxytell( uint8_t prefix, uint8_t *info_hash, uint8_t *pee | |||
| 767 | #endif | 808 | #endif |
| 768 | g_peerbuffer_pos += sizeof(ot_peer); | 809 | g_peerbuffer_pos += sizeof(ot_peer); |
| 769 | 810 | ||
| 770 | if( g_peerbuffer_pos >= g_peerbuffer_highwater ) | 811 | if (g_peerbuffer_pos >= g_peerbuffer_highwater) |
| 771 | livesync_issue_peersync(); | 812 | livesync_issue_peersync(); |
| 772 | } | 813 | } |
| 773 | 814 | ||
| 774 | static void process_indata( proxy_peer * peer ) { | 815 | static void process_indata(proxy_peer *peer) { |
| 775 | size_t consumed, peers; | 816 | size_t consumed, peers; |
| 776 | uint8_t *data = peer->indata, *hash; | 817 | uint8_t *data = peer->indata, *hash; |
| 777 | uint8_t *dataend = data + peer->indata_length; | 818 | uint8_t *dataend = data + peer->indata_length; |
| 778 | 819 | ||
| 779 | while( 1 ) { | 820 | while (1) { |
| 780 | /* If we're not inside of a packet, make a new one */ | 821 | /* If we're not inside of a packet, make a new one */ |
| 781 | if( !peer->packet_tcount ) { | 822 | if (!peer->packet_tcount) { |
| 782 | /* Ensure the header is complete or postpone processing */ | 823 | /* Ensure the header is complete or postpone processing */ |
| 783 | if( data + 4 > dataend ) break; | 824 | if (data + 4 > dataend) |
| 784 | peer->packet_type = data[0]; | 825 | break; |
| 785 | peer->packet_tprefix = data[1]; | 826 | peer->packet_type = data[0]; |
| 786 | peer->packet_tcount = data[2] * 256 + data[3]; | 827 | peer->packet_tprefix = data[1]; |
| 787 | data += 4; | 828 | peer->packet_tcount = data[2] * 256 + data[3]; |
| 788 | printf( "type: %hhu, prefix: %02X, torrentcount: %zd\n", peer->packet_type, peer->packet_tprefix, peer->packet_tcount ); | 829 | data += 4; |
| 830 | printf("type: %hhu, prefix: %02X, torrentcount: %zd\n", peer->packet_type, peer->packet_tprefix, peer->packet_tcount); | ||
| 789 | } | 831 | } |
| 790 | 832 | ||
| 791 | /* Ensure size for a minimal torrent block */ | 833 | /* Ensure size for a minimal torrent block */ |
| 792 | if( data + sizeof(ot_hash) + OT_IP_SIZE + 3 > dataend ) break; | 834 | if (data + sizeof(ot_hash) + OT_IP_SIZE + 3 > dataend) |
| 835 | break; | ||
| 793 | 836 | ||
| 794 | /* Advance pointer to peer count or peers */ | 837 | /* Advance pointer to peer count or peers */ |
| 795 | hash = data; | 838 | hash = data; |
| 796 | data += sizeof(ot_hash) - 1; | 839 | data += sizeof(ot_hash) - 1; |
| 797 | 840 | ||
| 798 | /* Type 0 has peer count encoded before each peers */ | 841 | /* Type 0 has peer count encoded before each peers */ |
| 799 | peers = peer->packet_type; | 842 | peers = peer->packet_type; |
| 800 | if( !peers ) { | 843 | if (!peers) { |
| 801 | int shift = 0; | 844 | int shift = 0; |
| 802 | do peers |= ( 0x7f & *data ) << ( 7 * shift ); | 845 | do |
| 803 | while ( *(data++) & 0x80 && shift++ < 6 ); | 846 | peers |= (0x7f & *data) << (7 * shift); |
| 847 | while (*(data++) & 0x80 && shift++ < 6); | ||
| 804 | } | 848 | } |
| 805 | #if 0 | 849 | #if 0 |
| 806 | printf( "peers: %zd\n", peers ); | 850 | printf( "peers: %zd\n", peers ); |
| 807 | #endif | 851 | #endif |
| 808 | /* Ensure enough data being read to hold all peers */ | 852 | /* Ensure enough data being read to hold all peers */ |
| 809 | if( data + (OT_IP_SIZE + 3) * peers > dataend ) { | 853 | if (data + (OT_IP_SIZE + 3) * peers > dataend) { |
| 810 | data = hash; | 854 | data = hash; |
| 811 | break; | 855 | break; |
| 812 | } | 856 | } |
| 813 | while( peers-- ) { | 857 | while (peers--) { |
| 814 | livesync_proxytell( peer->packet_tprefix, hash, data ); | 858 | livesync_proxytell(peer->packet_tprefix, hash, data); |
| 815 | data += OT_IP_SIZE + 3; | 859 | data += OT_IP_SIZE + 3; |
| 816 | } | 860 | } |
| 817 | --peer->packet_tcount; | 861 | --peer->packet_tcount; |
| 818 | } | 862 | } |
| 819 | 863 | ||
| 820 | consumed = data - peer->indata; | 864 | consumed = data - peer->indata; |
| 821 | memmove( peer->indata, data, peer->indata_length - consumed ); | 865 | memmove(peer->indata, data, peer->indata_length - consumed); |
| 822 | peer->indata_length -= consumed; | 866 | peer->indata_length -= consumed; |
| 823 | } | 867 | } |
| 824 | 868 | ||
| 825 | static void * livesync_worker( void * args ) { | 869 | static void *livesync_worker(void *args) { |
| 826 | (void)args; | 870 | (void)args; |
| 827 | while( 1 ) { | 871 | while (1) { |
| 828 | ot_ip6 in_ip; uint16_t in_port; | 872 | ot_ip6 in_ip; |
| 829 | size_t datalen = socket_recv4(g_socket_in, (char*)g_inbuffer, LIVESYNC_INCOMING_BUFFSIZE, 12+(char*)in_ip, &in_port); | 873 | uint16_t in_port; |
| 874 | size_t datalen = socket_recv4(g_socket_in, (char *)g_inbuffer, LIVESYNC_INCOMING_BUFFSIZE, 12 + (char *)in_ip, &in_port); | ||
| 830 | 875 | ||
| 831 | /* Expect at least tracker id and packet type */ | 876 | /* Expect at least tracker id and packet type */ |
| 832 | if( datalen <= (ssize_t)(sizeof( g_tracker_id ) + sizeof( uint32_t )) ) | 877 | if (datalen <= (ssize_t)(sizeof(g_tracker_id) + sizeof(uint32_t))) |
| 833 | continue; | 878 | continue; |
| 834 | if( !memcmp( g_inbuffer, &g_tracker_id, sizeof( g_tracker_id ) ) ) { | 879 | if (!memcmp(g_inbuffer, &g_tracker_id, sizeof(g_tracker_id))) { |
| 835 | /* drop packet coming from ourselves */ | 880 | /* drop packet coming from ourselves */ |
| 836 | continue; | 881 | continue; |
| 837 | } | 882 | } |
| 838 | switch( uint32_read_big( sizeof( g_tracker_id ) + (char*)g_inbuffer ) ) { | 883 | switch (uint32_read_big((char *)g_inbuffer + sizeof(g_tracker_id))) { |
| 839 | case OT_SYNC_PEER: | 884 | case OT_SYNC_PEER4: |
| 840 | livesync_handle_peersync( datalen ); | 885 | livesync_handle_peersync(datalen, OT_PEER_SIZE4); |
| 886 | break; | ||
| 887 | case OT_SYNC_PEER6: | ||
| 888 | livesync_handle_peersync(datalen, OT_PEER_SIZE6); | ||
| 841 | break; | 889 | break; |
| 842 | default: | 890 | default: |
| 843 | // fprintf( stderr, "Received an unknown live sync packet type %u.\n", uint32_read_big( sizeof( g_tracker_id ) + (char*)g_inbuffer ) ); | 891 | // fprintf( stderr, "Received an unknown live sync packet type %u.\n", uint32_read_big( sizeof( g_tracker_id ) + (char*)g_inbuffer ) ); |
diff --git a/scan_urlencoded_query.c b/scan_urlencoded_query.c index a4f89c2..38d544a 100644 --- a/scan_urlencoded_query.c +++ b/scan_urlencoded_query.c | |||
| @@ -45,37 +45,45 @@ static const unsigned char is_unreserved[256] = { | |||
| 45 | 45 | ||
| 46 | /* Do a fast nibble to hex representation conversion */ | 46 | /* Do a fast nibble to hex representation conversion */ |
| 47 | static unsigned char fromhex(unsigned char x) { | 47 | static unsigned char fromhex(unsigned char x) { |
| 48 | x-='0'; if( x<=9) return x; | 48 | x -= '0'; |
| 49 | x&=~0x20; x-='A'-'0'; | 49 | if (x <= 9) |
| 50 | if( x<6 ) return x+10; | 50 | return x; |
| 51 | x &= ~0x20; | ||
| 52 | x -= 'A' - '0'; | ||
| 53 | if (x < 6) | ||
| 54 | return x + 10; | ||
| 51 | return 0xff; | 55 | return 0xff; |
| 52 | } | 56 | } |
| 53 | 57 | ||
| 54 | /* Skip the value of a param=value pair */ | 58 | /* Skip the value of a param=value pair */ |
| 55 | void scan_urlencoded_skipvalue( char **string ) { | 59 | void scan_urlencoded_skipvalue(char **string) { |
| 56 | const unsigned char* s=*(const unsigned char**) string; | 60 | const unsigned char *s = *(const unsigned char **)string; |
| 57 | unsigned char f; | 61 | unsigned char f; |
| 58 | 62 | ||
| 59 | /* Since we are asked to skip the 'value', we assume to stop at | 63 | /* Since we are asked to skip the 'value', we assume to stop at |
| 60 | terminators for a 'value' string position */ | 64 | terminators for a 'value' string position */ |
| 61 | while( ( f = is_unreserved[ *s++ ] ) & SCAN_SEARCHPATH_VALUE ); | 65 | while ((f = is_unreserved[*s++]) & SCAN_SEARCHPATH_VALUE) |
| 66 | ; | ||
| 62 | 67 | ||
| 63 | /* If we stopped at a hard terminator like \0 or \n, make the | 68 | /* If we stopped at a hard terminator like \0 or \n, make the |
| 64 | next scan_urlencoded_query encounter it again */ | 69 | next scan_urlencoded_query encounter it again */ |
| 65 | if( f & SCAN_SEARCHPATH_TERMINATOR ) --s; | 70 | if (f & SCAN_SEARCHPATH_TERMINATOR) |
| 71 | --s; | ||
| 66 | 72 | ||
| 67 | *string = (char*)s; | 73 | *string = (char *)s; |
| 68 | } | 74 | } |
| 69 | 75 | ||
| 70 | int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags) { | 76 | int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags) { |
| 71 | char *deststring = *string; | 77 | char *deststring = *string; |
| 72 | ssize_t match_length = scan_urlencoded_query(string, deststring, flags ); | 78 | ssize_t match_length = scan_urlencoded_query(string, deststring, flags); |
| 73 | 79 | ||
| 74 | if( match_length < 0 ) return match_length; | 80 | if (match_length < 0) |
| 75 | if( match_length == 0 ) return -3; | 81 | return match_length; |
| 82 | if (match_length == 0) | ||
| 83 | return -3; | ||
| 76 | 84 | ||
| 77 | while( keywords->key ) { | 85 | while (keywords->key) { |
| 78 | if( !strncmp( keywords->key, deststring, match_length ) && !keywords->key[match_length] ) | 86 | if (!strncmp(keywords->key, deststring, match_length) && !keywords->key[match_length]) |
| 79 | return keywords->value; | 87 | return keywords->value; |
| 80 | keywords++; | 88 | keywords++; |
| 81 | } | 89 | } |
| @@ -84,60 +92,73 @@ int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCH | |||
| 84 | } | 92 | } |
| 85 | 93 | ||
| 86 | ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_FLAG flags) { | 94 | ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_FLAG flags) { |
| 87 | const unsigned char* s=*(const unsigned char**) string; | 95 | const unsigned char *s = *(const unsigned char **)string; |
| 88 | unsigned char *d = (unsigned char*)deststring; | 96 | unsigned char *d = (unsigned char *)deststring; |
| 89 | unsigned char b, c; | 97 | unsigned char b, c; |
| 90 | 98 | ||
| 91 | /* This is the main decoding loop. | 99 | /* This is the main decoding loop. |
| 92 | 'flag' determines, which characters are non-terminating in current context | 100 | 'flag' determines, which characters are non-terminating in current context |
| 93 | (ie. stop at '=' and '&' if scanning for a 'param'; stop at '?' if scanning for the path ) | 101 | (ie. stop at '=' and '&' if scanning for a 'param'; stop at '?' if scanning for the path ) |
| 94 | */ | 102 | */ |
| 95 | while( is_unreserved[ c = *s++ ] & flags ) { | 103 | while (is_unreserved[c = *s++] & flags) { |
| 96 | 104 | ||
| 97 | /* When encountering an url escaped character, try to decode */ | 105 | /* When encountering an url escaped character, try to decode */ |
| 98 | if( c=='%') { | 106 | if (c == '%') { |
| 99 | if( ( b = fromhex(*s++) ) == 0xff ) return -1; | 107 | if ((b = fromhex(*s++)) == 0xff) |
| 100 | if( ( c = fromhex(*s++) ) == 0xff ) return -1; | 108 | return -1; |
| 101 | c|=(b<<4); | 109 | if ((c = fromhex(*s++)) == 0xff) |
| 110 | return -1; | ||
| 111 | c |= (b << 4); | ||
| 102 | } | 112 | } |
| 103 | 113 | ||
| 104 | /* Write (possibly decoded) character to output */ | 114 | /* Write (possibly decoded) character to output */ |
| 105 | *d++ = c; | 115 | *d++ = c; |
| 106 | } | 116 | } |
| 107 | 117 | ||
| 108 | switch( c ) { | 118 | switch (c) { |
| 109 | case 0: case '\r': case '\n': case ' ': | 119 | case 0: |
| 120 | case '\r': | ||
| 121 | case '\n': | ||
| 122 | case ' ': | ||
| 110 | /* If we started scanning on a hard terminator, indicate we've finished */ | 123 | /* If we started scanning on a hard terminator, indicate we've finished */ |
| 111 | if( d == (unsigned char*)deststring ) return -2; | 124 | if (d == (unsigned char *)deststring) |
| 125 | return -2; | ||
| 112 | 126 | ||
| 113 | /* Else make the next call to scan_urlencoded_param encounter it again */ | 127 | /* Else make the next call to scan_urlencoded_param encounter it again */ |
| 114 | --s; | 128 | --s; |
| 115 | break; | 129 | break; |
| 116 | case '?': | 130 | case '?': |
| 117 | if( flags != SCAN_PATH ) return -1; | 131 | if (flags != SCAN_PATH) |
| 132 | return -1; | ||
| 118 | break; | 133 | break; |
| 119 | case '=': | 134 | case '=': |
| 120 | if( flags != SCAN_SEARCHPATH_PARAM ) return -1; | 135 | if (flags != SCAN_SEARCHPATH_PARAM) |
| 136 | return -1; | ||
| 121 | break; | 137 | break; |
| 122 | case '&': | 138 | case '&': |
| 123 | if( flags == SCAN_PATH ) return -1; | 139 | if (flags == SCAN_PATH) |
| 124 | if( flags == SCAN_SEARCHPATH_PARAM ) --s; | 140 | return -1; |
| 141 | if (flags == SCAN_SEARCHPATH_PARAM) | ||
| 142 | --s; | ||
| 125 | break; | 143 | break; |
| 126 | default: | 144 | default: |
| 127 | return -1; | 145 | return -1; |
| 128 | } | 146 | } |
| 129 | 147 | ||
| 130 | *string = (char *)s; | 148 | *string = (char *)s; |
| 131 | return d - (unsigned char*)deststring; | 149 | return d - (unsigned char *)deststring; |
| 132 | } | 150 | } |
| 133 | 151 | ||
| 134 | ssize_t scan_fixed_int( char *data, size_t len, int *tmp ) { | 152 | ssize_t scan_fixed_int(char *data, size_t len, int *tmp) { |
| 135 | int minus = 0; | 153 | int minus = 0; |
| 136 | *tmp = 0; | 154 | *tmp = 0; |
| 137 | if( *data == '-' ) --len, ++data, ++minus; | 155 | if (*data == '-') |
| 138 | while( (len > 0) && (*data >= '0') && (*data <= '9') ) { --len; *tmp = 10**tmp + *data++-'0'; } | 156 | --len, ++data, ++minus; |
| 139 | if( minus ) *tmp = -*tmp; | 157 | while ((len > 0) && (*data >= '0') && (*data <= '9')) { |
| 158 | --len; | ||
| 159 | *tmp = 10 * *tmp + *data++ - '0'; | ||
| 160 | } | ||
| 161 | if (minus) | ||
| 162 | *tmp = -*tmp; | ||
| 140 | return len; | 163 | return len; |
| 141 | } | 164 | } |
| 142 | |||
| 143 | const char *g_version_scan_urlencoded_query_c = "$Source$: $Revision$\n"; | ||
diff --git a/scan_urlencoded_query.h b/scan_urlencoded_query.h index 06b91f5..74246e7 100644 --- a/scan_urlencoded_query.h +++ b/scan_urlencoded_query.h | |||
| @@ -38,18 +38,18 @@ ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_F | |||
| 38 | or -2 for terminator found | 38 | or -2 for terminator found |
| 39 | or -3 for no keyword matched | 39 | or -3 for no keyword matched |
| 40 | */ | 40 | */ |
| 41 | int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags); | 41 | int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags); |
| 42 | 42 | ||
| 43 | /* string in: pointer to value of a param=value pair to skip | 43 | /* string in: pointer to value of a param=value pair to skip |
| 44 | out: pointer to next scan position on return | 44 | out: pointer to next scan position on return |
| 45 | */ | 45 | */ |
| 46 | void scan_urlencoded_skipvalue( char **string ); | 46 | void scan_urlencoded_skipvalue(char **string); |
| 47 | 47 | ||
| 48 | /* data pointer to len chars of string | 48 | /* data pointer to len chars of string |
| 49 | len length of chars in data to parse | 49 | len length of chars in data to parse |
| 50 | number number to receive result | 50 | number number to receive result |
| 51 | returns number of bytes not parsed, mostly !=0 means fail | 51 | returns number of bytes not parsed, mostly !=0 means fail |
| 52 | */ | 52 | */ |
| 53 | ssize_t scan_fixed_int( char *data, size_t len, int *number ); | 53 | ssize_t scan_fixed_int(char *data, size_t len, int *number); |
| 54 | 54 | ||
| 55 | #endif | 55 | #endif |
diff --git a/tests/testsuite2.sh b/tests/testsuite2.sh index c9a5a6a..da5181b 100644 --- a/tests/testsuite2.sh +++ b/tests/testsuite2.sh | |||
| @@ -2,13 +2,21 @@ | |||
| 2 | 2 | ||
| 3 | while true; do | 3 | while true; do |
| 4 | request_string="GET /announce?info_hash=012345678901234567\ | 4 | request_string="GET /announce?info_hash=012345678901234567\ |
| 5 | %$(printf %02X $(( $RANDOM & 0xff )) )\ | 5 | $(printf %02X $(( $RANDOM & 0xff )) )\ |
| 6 | %$(printf %02X $(( $RANDOM & 0xff )) )\ | 6 | &ip=$(( $RANDOM & 0xff )).17.13.15&port=$(( $RANDOM & 0xff )) HTTP/1.0" |
| 7 | &ip=$(( $RANDOM & 0xff )).17.13.15&port=$(( $RANDOM & 0xff )) HTTP/1.0\n" | 7 | |
| 8 | 8 | # echo $request_string | |
| 9 | echo $request_string | 9 | # echo |
| 10 | echo | 10 | printf "%s\n\n" "$request_string" | nc 84.200.61.9 6969 | hexdump -C |
| 11 | echo $request_string | nc 23.23.23.237 6969 >/dev/null | 11 | |
| 12 | echo | 12 | request_string="GET /announce?info_hash=012345678901234567\ |
| 13 | $(printf %02X $(( $RANDOM & 0xff )) )\ | ||
| 14 | &ip=2001:1608:6:27::$(( $RANDOM & 0xff ))&port=$(( $RANDOM & 0xff )) HTTP/1.0" | ||
| 15 | printf "%s\n\n" "$request_string" | nc 2001:1608:6:27::9 6969 | hexdump -C | ||
| 16 | printf "%s\n\n" "$request_string" | ||
| 17 | |||
| 18 | request_string="GET /scrape?info_hash=012345678901234567\ | ||
| 19 | $(printf %02X $(( $RANDOM & 0xff )) ) HTTP/1.0" | ||
| 20 | printf "%s\n\n" "$request_string" | nc 2001:1608:6:27::9 6969 | hexdump -C | ||
| 13 | 21 | ||
| 14 | done | 22 | done |
diff --git a/trackerlogic.c b/trackerlogic.c index 719f8a2..04df544 100644 --- a/trackerlogic.c +++ b/trackerlogic.c | |||
| @@ -4,454 +4,595 @@ | |||
| 4 | $id$ */ | 4 | $id$ */ |
| 5 | 5 | ||
| 6 | /* System */ | 6 | /* System */ |
| 7 | #include <stdlib.h> | ||
| 8 | #include <string.h> | ||
| 9 | #include <stdio.h> | ||
| 10 | #include <arpa/inet.h> | 7 | #include <arpa/inet.h> |
| 11 | #include <unistd.h> | ||
| 12 | #include <errno.h> | 8 | #include <errno.h> |
| 13 | #include <stdint.h> | 9 | #include <stdint.h> |
| 10 | #include <stdio.h> | ||
| 11 | #include <stdlib.h> | ||
| 12 | #include <string.h> | ||
| 13 | #include <unistd.h> | ||
| 14 | 14 | ||
| 15 | /* Libowfat */ | 15 | /* Libowfat */ |
| 16 | #include "array.h" | ||
| 16 | #include "byte.h" | 17 | #include "byte.h" |
| 17 | #include "io.h" | 18 | #include "io.h" |
| 18 | #include "iob.h" | 19 | #include "iob.h" |
| 19 | #include "array.h" | 20 | #include "ip6.h" |
| 20 | 21 | ||
| 21 | /* Opentracker */ | 22 | /* Opentracker */ |
| 22 | #include "trackerlogic.h" | ||
| 23 | #include "ot_mutex.h" | ||
| 24 | #include "ot_stats.h" | ||
| 25 | #include "ot_clean.h" | ||
| 26 | #include "ot_http.h" | ||
| 27 | #include "ot_accesslist.h" | 23 | #include "ot_accesslist.h" |
| 24 | #include "ot_clean.h" | ||
| 28 | #include "ot_fullscrape.h" | 25 | #include "ot_fullscrape.h" |
| 26 | #include "ot_http.h" | ||
| 29 | #include "ot_livesync.h" | 27 | #include "ot_livesync.h" |
| 28 | #include "ot_mutex.h" | ||
| 29 | #include "ot_stats.h" | ||
| 30 | #include "ot_vector.h" | ||
| 31 | #include "trackerlogic.h" | ||
| 30 | 32 | ||
| 31 | /* Forward declaration */ | 33 | /* Forward declaration */ |
| 32 | size_t return_peers_for_torrent( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ); | 34 | size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto); |
| 33 | 35 | ||
| 34 | void free_peerlist( ot_peerlist *peer_list ) { | 36 | void free_peerlist(ot_peerlist *peer_list) { |
| 35 | if( peer_list->peers.data ) { | 37 | if (peer_list->peers.data) { |
| 36 | if( OT_PEERLIST_HASBUCKETS( peer_list ) ) { | 38 | if (OT_PEERLIST_HASBUCKETS(peer_list)) |
| 37 | ot_vector *bucket_list = (ot_vector*)(peer_list->peers.data); | 39 | vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size); |
| 38 | 40 | else | |
| 39 | while( peer_list->peers.size-- ) | 41 | free(peer_list->peers.data); |
| 40 | free( bucket_list++->data ); | ||
| 41 | } | ||
| 42 | free( peer_list->peers.data ); | ||
| 43 | } | 42 | } |
| 44 | free( peer_list ); | 43 | free(peer_list); |
| 45 | } | 44 | } |
| 46 | 45 | ||
| 47 | void add_torrent_from_saved_state( ot_hash hash, ot_time base, size_t down_count ) { | 46 | void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count) { |
| 48 | int exactmatch; | 47 | int exactmatch; |
| 49 | ot_torrent *torrent; | 48 | ot_torrent *torrent; |
| 50 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | 49 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash); |
| 51 | 50 | ||
| 52 | if( !accesslist_hashisvalid( hash ) ) | 51 | if (!accesslist_hashisvalid(hash)) |
| 53 | return mutex_bucket_unlock_by_hash( hash, 0 ); | 52 | return mutex_bucket_unlock_by_hash(hash, 0); |
| 54 | 53 | ||
| 55 | torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 54 | torrent = vector_find_or_insert(torrents_list, (void *)hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
| 56 | if( !torrent || exactmatch ) | 55 | if (!torrent || exactmatch) |
| 57 | return mutex_bucket_unlock_by_hash( hash, 0 ); | 56 | return mutex_bucket_unlock_by_hash(hash, 0); |
| 58 | 57 | ||
| 59 | /* Create a new torrent entry, then */ | 58 | /* Create a new torrent entry, then */ |
| 60 | memcpy( torrent->hash, hash, sizeof(ot_hash) ); | 59 | byte_zero(torrent, sizeof(ot_torrent)); |
| 60 | memcpy(torrent->hash, hash, sizeof(ot_hash)); | ||
| 61 | 61 | ||
| 62 | if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { | 62 | if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) { |
| 63 | vector_remove_torrent( torrents_list, torrent ); | 63 | vector_remove_torrent(torrents_list, torrent); |
| 64 | return mutex_bucket_unlock_by_hash( hash, 0 ); | 64 | return mutex_bucket_unlock_by_hash(hash, 0); |
| 65 | } | 65 | } |
| 66 | 66 | ||
| 67 | byte_zero( torrent->peer_list, sizeof( ot_peerlist ) ); | 67 | byte_zero(torrent->peer_list6, sizeof(ot_peerlist)); |
| 68 | torrent->peer_list->base = base; | 68 | byte_zero(torrent->peer_list4, sizeof(ot_peerlist)); |
| 69 | torrent->peer_list->down_count = down_count; | 69 | torrent->peer_list6->base = base; |
| 70 | torrent->peer_list4->base = base; | ||
| 71 | torrent->peer_list6->down_count = down_count; | ||
| 72 | torrent->peer_list4->down_count = down_count; | ||
| 70 | 73 | ||
| 71 | return mutex_bucket_unlock_by_hash( hash, 1 ); | 74 | return mutex_bucket_unlock_by_hash(hash, 1); |
| 72 | } | 75 | } |
| 73 | 76 | ||
| 74 | size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount ) { | 77 | size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount) { |
| 75 | int exactmatch, delta_torrentcount = 0; | 78 | int exactmatch, delta_torrentcount = 0; |
| 76 | ot_torrent *torrent; | 79 | ot_torrent *torrent; |
| 77 | ot_peer *peer_dest; | 80 | ot_peer *peer_dest; |
| 78 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash ); | 81 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash); |
| 79 | 82 | ot_peerlist *peer_list; | |
| 80 | if( !accesslist_hashisvalid( *ws->hash ) ) { | 83 | size_t peer_size; /* initialized in next line */ |
| 81 | mutex_bucket_unlock_by_hash( *ws->hash, 0 ); | 84 | ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size); |
| 82 | if( proto == FLAG_TCP ) { | 85 | |
| 86 | if (!accesslist_hashisvalid(*ws->hash)) { | ||
| 87 | mutex_bucket_unlock_by_hash(*ws->hash, 0); | ||
| 88 | if (proto == FLAG_TCP) { | ||
| 83 | const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e"; | 89 | const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e"; |
| 84 | memcpy( ws->reply, invalid_hash, strlen( invalid_hash ) ); | 90 | memcpy(ws->reply, invalid_hash, strlen(invalid_hash)); |
| 85 | return strlen( invalid_hash ); | 91 | return strlen(invalid_hash); |
| 86 | } | 92 | } |
| 87 | return 0; | 93 | return 0; |
| 88 | } | 94 | } |
| 89 | 95 | ||
| 90 | torrent = vector_find_or_insert( torrents_list, (void*)ws->hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 96 | torrent = vector_find_or_insert(torrents_list, (void *)ws->hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
| 91 | if( !torrent ) { | 97 | if (!torrent) { |
| 92 | mutex_bucket_unlock_by_hash( *ws->hash, 0 ); | 98 | mutex_bucket_unlock_by_hash(*ws->hash, 0); |
| 93 | return 0; | 99 | return 0; |
| 94 | } | 100 | } |
| 95 | 101 | ||
| 96 | if( !exactmatch ) { | 102 | if (!exactmatch) { |
| 97 | /* Create a new torrent entry, then */ | 103 | /* Create a new torrent entry, then */ |
| 98 | memcpy( torrent->hash, *ws->hash, sizeof(ot_hash) ); | 104 | byte_zero(torrent, sizeof(ot_torrent)); |
| 105 | memcpy(torrent->hash, *ws->hash, sizeof(ot_hash)); | ||
| 99 | 106 | ||
| 100 | if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) { | 107 | if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) { |
| 101 | vector_remove_torrent( torrents_list, torrent ); | 108 | vector_remove_torrent(torrents_list, torrent); |
| 102 | mutex_bucket_unlock_by_hash( *ws->hash, 0 ); | 109 | mutex_bucket_unlock_by_hash(*ws->hash, 0); |
| 103 | return 0; | 110 | return 0; |
| 104 | } | 111 | } |
| 105 | 112 | ||
| 106 | byte_zero( torrent->peer_list, sizeof( ot_peerlist ) ); | 113 | byte_zero(torrent->peer_list6, sizeof(ot_peerlist)); |
| 114 | byte_zero(torrent->peer_list4, sizeof(ot_peerlist)); | ||
| 107 | delta_torrentcount = 1; | 115 | delta_torrentcount = 1; |
| 108 | } else | 116 | } else |
| 109 | clean_single_torrent( torrent ); | 117 | clean_single_torrent(torrent); |
| 110 | 118 | ||
| 111 | torrent->peer_list->base = g_now_minutes; | 119 | torrent->peer_list6->base = g_now_minutes; |
| 120 | torrent->peer_list4->base = g_now_minutes; | ||
| 121 | |||
| 122 | peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; | ||
| 112 | 123 | ||
| 113 | /* Check for peer in torrent */ | 124 | /* Check for peer in torrent */ |
| 114 | peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), &ws->peer, &exactmatch ); | 125 | peer_dest = vector_find_or_insert_peer(&(peer_list->peers), peer_src, peer_size, &exactmatch); |
| 115 | if( !peer_dest ) { | 126 | if (!peer_dest) { |
| 116 | mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); | 127 | mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount); |
| 117 | return 0; | 128 | return 0; |
| 118 | } | 129 | } |
| 119 | 130 | ||
| 120 | /* Tell peer that it's fresh */ | 131 | /* Tell peer that it's fresh */ |
| 121 | OT_PEERTIME( &ws->peer ) = 0; | 132 | OT_PEERTIME(ws->peer, OT_PEER_SIZE6) = 0; |
| 122 | 133 | ||
| 123 | /* Sanitize flags: Whoever claims to have completed download, must be a seeder */ | 134 | /* Sanitize flags: Whoever claims to have completed download, must be a seeder */ |
| 124 | if( ( OT_PEERFLAG( &ws->peer ) & ( PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING ) ) == PEER_FLAG_COMPLETED ) | 135 | if ((OT_PEERFLAG(ws->peer) & (PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING)) == PEER_FLAG_COMPLETED) |
| 125 | OT_PEERFLAG( &ws->peer ) ^= PEER_FLAG_COMPLETED; | 136 | OT_PEERFLAG(ws->peer) ^= PEER_FLAG_COMPLETED; |
| 126 | 137 | ||
| 127 | /* If we hadn't had a match create peer there */ | 138 | /* If we hadn't had a match create peer there */ |
| 128 | if( !exactmatch ) { | 139 | if (!exactmatch) { |
| 129 | 140 | ||
| 130 | #ifdef WANT_SYNC_LIVE | 141 | #ifdef WANT_SYNC_LIVE |
| 131 | if( proto == FLAG_MCA ) | 142 | if (proto == FLAG_MCA) |
| 132 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_FROM_SYNC; | 143 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_FROM_SYNC; |
| 133 | else | 144 | else |
| 134 | livesync_tell( ws ); | 145 | livesync_tell(ws); |
| 135 | #endif | 146 | #endif |
| 136 | 147 | ||
| 137 | torrent->peer_list->peer_count++; | 148 | peer_list->peer_count++; |
| 138 | if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) { | 149 | if (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED) { |
| 139 | torrent->peer_list->down_count++; | 150 | peer_list->down_count++; |
| 140 | stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws ); | 151 | stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws); |
| 141 | } | 152 | } |
| 142 | if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) | 153 | if (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING) |
| 143 | torrent->peer_list->seed_count++; | 154 | peer_list->seed_count++; |
| 144 | 155 | ||
| 145 | } else { | 156 | } else { |
| 146 | stats_issue_event( EVENT_RENEW, 0, OT_PEERTIME( peer_dest ) ); | 157 | stats_issue_event(EVENT_RENEW, 0, OT_PEERTIME(peer_dest, peer_size)); |
| 147 | #ifdef WANT_SPOT_WOODPECKER | 158 | #ifdef WANT_SPOT_WOODPECKER |
| 148 | if( ( OT_PEERTIME(peer_dest) > 0 ) && ( OT_PEERTIME(peer_dest) < 20 ) ) | 159 | if ((OT_PEERTIME(peer_dest, peer_size) > 0) && (OT_PEERTIME(peer_dest, peer_size) < 20)) |
| 149 | stats_issue_event( EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer ); | 160 | stats_issue_event(EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer); |
| 150 | #endif | 161 | #endif |
| 151 | #ifdef WANT_SYNC_LIVE | 162 | #ifdef WANT_SYNC_LIVE |
| 152 | /* Won't live sync peers that come back too fast. Only exception: | 163 | /* Won't live sync peers that come back too fast. Only exception: |
| 153 | fresh "completed" reports */ | 164 | fresh "completed" reports */ |
| 154 | if( proto != FLAG_MCA ) { | 165 | if (proto != FLAG_MCA) { |
| 155 | if( OT_PEERTIME( peer_dest ) > OT_CLIENT_SYNC_RENEW_BOUNDARY || | 166 | if (OT_PEERTIME(peer_dest, peer_size) > OT_CLIENT_SYNC_RENEW_BOUNDARY || |
| 156 | ( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) ) ) | 167 | (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED))) |
| 157 | livesync_tell( ws ); | 168 | livesync_tell(ws); |
| 158 | } | 169 | } |
| 159 | #endif | 170 | #endif |
| 160 | 171 | ||
| 161 | if( (OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && !(OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) ) | 172 | if ((OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && !(OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING)) |
| 162 | torrent->peer_list->seed_count--; | 173 | peer_list->seed_count--; |
| 163 | if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) ) | 174 | if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING)) |
| 164 | torrent->peer_list->seed_count++; | 175 | peer_list->seed_count++; |
| 165 | if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) ) { | 176 | if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED)) { |
| 166 | torrent->peer_list->down_count++; | 177 | peer_list->down_count++; |
| 167 | stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws ); | 178 | stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws); |
| 168 | } | 179 | } |
| 169 | if( OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) | 180 | if (OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) |
| 170 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED; | 181 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED; |
| 171 | } | 182 | } |
| 172 | 183 | ||
| 173 | memcpy( peer_dest, &ws->peer, sizeof(ot_peer) ); | 184 | memcpy(peer_dest, peer_src, peer_size); |
| 174 | #ifdef WANT_SYNC | 185 | #ifdef WANT_SYNC |
| 175 | if( proto == FLAG_MCA ) { | 186 | if (proto == FLAG_MCA) { |
| 176 | mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); | 187 | mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount); |
| 177 | return 0; | 188 | return 0; |
| 178 | } | 189 | } |
| 179 | #endif | 190 | #endif |
| 180 | 191 | ||
| 181 | ws->reply_size = return_peers_for_torrent( ws, torrent, amount, ws->reply, proto ); | 192 | ws->reply_size = return_peers_for_torrent(ws, torrent, amount, ws->reply, proto); |
| 182 | mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount ); | 193 | mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount); |
| 183 | return ws->reply_size; | 194 | return ws->reply_size; |
| 184 | } | 195 | } |
| 185 | 196 | ||
| 186 | static size_t return_peers_all( ot_peerlist *peer_list, char *reply ) { | 197 | static size_t return_peers_all(ot_peerlist *peer_list, size_t peer_size, char *reply) { |
| 187 | unsigned int bucket, num_buckets = 1; | 198 | unsigned int bucket, num_buckets = 1; |
| 188 | ot_vector * bucket_list = &peer_list->peers; | 199 | ot_vector *bucket_list = &peer_list->peers; |
| 189 | size_t result = OT_PEER_COMPARE_SIZE * peer_list->peer_count; | 200 | size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); |
| 190 | char * r_end = reply + result; | 201 | size_t result = compare_size * peer_list->peer_count; |
| 202 | char *r_end = reply + result; | ||
| 191 | 203 | ||
| 192 | if( OT_PEERLIST_HASBUCKETS(peer_list) ) { | 204 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
| 193 | num_buckets = bucket_list->size; | 205 | num_buckets = bucket_list->size; |
| 194 | bucket_list = (ot_vector *)bucket_list->data; | 206 | bucket_list = (ot_vector *)bucket_list->data; |
| 195 | } | 207 | } |
| 196 | 208 | ||
| 197 | for( bucket = 0; bucket<num_buckets; ++bucket ) { | 209 | for (bucket = 0; bucket < num_buckets; ++bucket) { |
| 198 | ot_peer * peers = (ot_peer*)bucket_list[bucket].data; | 210 | ot_peer *peers = bucket_list[bucket].data; |
| 199 | size_t peer_count = bucket_list[bucket].size; | 211 | size_t peer_count = bucket_list[bucket].size; |
| 200 | while( peer_count-- ) { | 212 | while (peer_count--) { |
| 201 | if( OT_PEERFLAG(peers) & PEER_FLAG_SEEDING ) { | 213 | if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING) { |
| 202 | r_end-=OT_PEER_COMPARE_SIZE; | 214 | r_end -= compare_size; |
| 203 | memcpy(r_end,peers++,OT_PEER_COMPARE_SIZE); | 215 | memcpy(r_end, peers, compare_size); |
| 204 | } else { | 216 | } else { |
| 205 | memcpy(reply,peers++,OT_PEER_COMPARE_SIZE); | 217 | memcpy(reply, peers, compare_size); |
| 206 | reply+=OT_PEER_COMPARE_SIZE; | 218 | reply += compare_size; |
| 207 | } | 219 | } |
| 220 | peers += peer_size; | ||
| 208 | } | 221 | } |
| 209 | } | 222 | } |
| 210 | return result; | 223 | return result; |
| 211 | } | 224 | } |
| 212 | 225 | ||
| 213 | static size_t return_peers_selection( struct ot_workstruct *ws, ot_peerlist *peer_list, size_t amount, char *reply ) { | 226 | static size_t return_peers_selection(struct ot_workstruct *ws, ot_peerlist *peer_list, size_t peer_size, size_t amount, char *reply) { |
| 214 | unsigned int bucket_offset, bucket_index = 0, num_buckets = 1; | 227 | unsigned int bucket_offset, bucket_index = 0, num_buckets = 1; |
| 215 | ot_vector * bucket_list = &peer_list->peers; | 228 | ot_vector *bucket_list = &peer_list->peers; |
| 216 | unsigned int shifted_pc = peer_list->peer_count; | 229 | unsigned int shifted_pc = peer_list->peer_count; |
| 217 | unsigned int shifted_step = 0; | 230 | unsigned int shifted_step = 0; |
| 218 | unsigned int shift = 0; | 231 | unsigned int shift = 0; |
| 219 | size_t result = OT_PEER_COMPARE_SIZE * amount; | 232 | size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size); |
| 220 | char * r_end = reply + result; | 233 | size_t result = compare_size * amount; |
| 234 | char *r_end = reply + result; | ||
| 221 | 235 | ||
| 222 | if( OT_PEERLIST_HASBUCKETS(peer_list) ) { | 236 | if (OT_PEERLIST_HASBUCKETS(peer_list)) { |
| 223 | num_buckets = bucket_list->size; | 237 | num_buckets = bucket_list->size; |
| 224 | bucket_list = (ot_vector *)bucket_list->data; | 238 | bucket_list = (ot_vector *)bucket_list->data; |
| 225 | } | 239 | } |
| 226 | 240 | ||
| 227 | /* Make fixpoint arithmetic as exact as possible */ | 241 | /* Make fixpoint arithmetic as exact as possible */ |
| 228 | #define MAXPRECBIT (1<<(8*sizeof(int)-3)) | 242 | #define MAXPRECBIT (1 << (8 * sizeof(int) - 3)) |
| 229 | while( !(shifted_pc & MAXPRECBIT ) ) { shifted_pc <<= 1; shift++; } | 243 | while (!(shifted_pc & MAXPRECBIT)) { |
| 230 | shifted_step = shifted_pc/amount; | 244 | shifted_pc <<= 1; |
| 245 | shift++; | ||
| 246 | } | ||
| 247 | shifted_step = shifted_pc / amount; | ||
| 231 | #undef MAXPRECBIT | 248 | #undef MAXPRECBIT |
| 232 | 249 | ||
| 233 | /* Initialize somewhere in the middle of peers so that | 250 | /* Initialize somewhere in the middle of peers so that |
| 234 | fixpoint's aliasing doesn't alway miss the same peers */ | 251 | fixpoint's aliasing doesn't alway miss the same peers */ |
| 235 | bucket_offset = nrand48(ws->rand48_state) % peer_list->peer_count; | 252 | bucket_offset = nrand48(ws->rand48_state) % peer_list->peer_count; |
| 236 | 253 | ||
| 237 | while( amount-- ) { | 254 | while (amount--) { |
| 238 | ot_peer * peer; | 255 | ot_peer *peer; |
| 239 | 256 | ||
| 240 | /* This is the aliased, non shifted range, next value may fall into */ | 257 | /* This is the aliased, non shifted range, next value may fall into */ |
| 241 | unsigned int diff = ( ( ( amount + 1 ) * shifted_step ) >> shift ) - | 258 | unsigned int diff = (((amount + 1) * shifted_step) >> shift) - ((amount * shifted_step) >> shift); |
| 242 | ( ( amount * shifted_step ) >> shift ); | 259 | bucket_offset += 1 + nrand48(ws->rand48_state) % diff; |
| 243 | bucket_offset += 1 + nrand48(ws->rand48_state) % diff; | ||
| 244 | 260 | ||
| 245 | while( bucket_offset >= bucket_list[bucket_index].size ) { | 261 | while (bucket_offset >= bucket_list[bucket_index].size) { |
| 246 | bucket_offset -= bucket_list[bucket_index].size; | 262 | bucket_offset -= bucket_list[bucket_index].size; |
| 247 | bucket_index = ( bucket_index + 1 ) % num_buckets; | 263 | bucket_index = (bucket_index + 1) % num_buckets; |
| 248 | } | 264 | } |
| 249 | peer = ((ot_peer*)bucket_list[bucket_index].data) + bucket_offset; | 265 | peer = bucket_list[bucket_index].data + peer_size * bucket_offset; |
| 250 | if( OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) { | 266 | if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_SEEDING) { |
| 251 | r_end-=OT_PEER_COMPARE_SIZE; | 267 | r_end -= compare_size; |
| 252 | memcpy(r_end,peer,OT_PEER_COMPARE_SIZE); | 268 | memcpy(r_end, peer, compare_size); |
| 253 | } else { | 269 | } else { |
| 254 | memcpy(reply,peer,OT_PEER_COMPARE_SIZE); | 270 | memcpy(reply, peer, compare_size); |
| 255 | reply+=OT_PEER_COMPARE_SIZE; | 271 | reply += compare_size; |
| 256 | } | 272 | } |
| 257 | } | 273 | } |
| 258 | return result; | 274 | return result; |
| 259 | } | 275 | } |
| 260 | 276 | ||
| 261 | /* Compiles a list of random peers for a torrent | 277 | static size_t return_peers_for_torrent_udp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) { |
| 262 | * reply must have enough space to hold 92+6*amount bytes | 278 | char *r = reply; |
| 263 | * does not yet check not to return self | 279 | size_t peer_size = peer_size_from_peer6(&ws->peer); |
| 264 | */ | 280 | ot_peerlist *peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; |
| 265 | size_t return_peers_for_torrent( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ) { | 281 | size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; |
| 266 | ot_peerlist *peer_list = torrent->peer_list; | 282 | size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; |
| 267 | char *r = reply; | 283 | |
| 268 | 284 | if (amount > peer_list->peer_count) | |
| 269 | if( amount > peer_list->peer_count ) | ||
| 270 | amount = peer_list->peer_count; | 285 | amount = peer_list->peer_count; |
| 271 | 286 | ||
| 272 | if( proto == FLAG_TCP ) { | 287 | *(uint32_t *)(r + 0) = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM); |
| 273 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; | 288 | *(uint32_t *)(r + 4) = htonl(peer_count - seed_count); |
| 274 | r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie" PEERS_BENCODED "%zd:", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count, erval, erval/2, OT_PEER_COMPARE_SIZE*amount ); | 289 | *(uint32_t *)(r + 8) = htonl(seed_count); |
| 275 | } else { | 290 | r += 12; |
| 276 | *(uint32_t*)(r+0) = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); | 291 | |
| 277 | *(uint32_t*)(r+4) = htonl( peer_list->peer_count - peer_list->seed_count ); | 292 | if (amount) { |
| 278 | *(uint32_t*)(r+8) = htonl( peer_list->seed_count ); | 293 | if (amount == peer_list->peer_count) |
| 279 | r += 12; | 294 | r += return_peers_all(peer_list, peer_size, r); |
| 295 | else | ||
| 296 | r += return_peers_selection(ws, peer_list, peer_size, amount, r); | ||
| 297 | } | ||
| 298 | return r - reply; | ||
| 299 | } | ||
| 300 | |||
| 301 | static size_t return_peers_for_torrent_tcp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) { | ||
| 302 | char *r = reply; | ||
| 303 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; | ||
| 304 | size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; | ||
| 305 | size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count; | ||
| 306 | size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - seed_count; | ||
| 307 | |||
| 308 | /* Simple case: amount of peers in both lists is less than requested, here we return all results */ | ||
| 309 | size_t amount_v4 = torrent->peer_list4->peer_count; | ||
| 310 | size_t amount_v6 = torrent->peer_list6->peer_count; | ||
| 311 | |||
| 312 | /* Complex case: both lists have more than enough entries and we need to split between v4 and v6 clients */ | ||
| 313 | if (amount_v4 + amount_v6 > amount) { | ||
| 314 | size_t amount_left, percent_v6 = 0, percent_v4 = 0, left_v6, left_v4; | ||
| 315 | const size_t SCALE = 1024; | ||
| 316 | |||
| 317 | /* If possible, fill at least a quarter of peer from each family */ | ||
| 318 | if (amount / 4 <= amount_v4) | ||
| 319 | amount_v4 = amount / 4; | ||
| 320 | if (amount / 4 <= amount_v6) | ||
| 321 | amount_v6 = amount / 4; | ||
| 322 | |||
| 323 | /* Fill the rest according to which family's pool provides more peers */ | ||
| 324 | amount_left = amount - (amount_v4 + amount_v6); | ||
| 325 | |||
| 326 | left_v4 = torrent->peer_list4->peer_count - amount_v4; | ||
| 327 | left_v6 = torrent->peer_list6->peer_count - amount_v6; | ||
| 328 | |||
| 329 | if (left_v4 + left_v6) { | ||
| 330 | percent_v4 = (SCALE * left_v4) / (left_v4 + left_v6); | ||
| 331 | percent_v6 = (SCALE * left_v6) / (left_v4 + left_v6); | ||
| 332 | } | ||
| 333 | |||
| 334 | amount_v4 += (amount_left * percent_v4) / SCALE; | ||
| 335 | amount_v6 += (amount_left * percent_v6) / SCALE; | ||
| 336 | |||
| 337 | /* Integer division rounding can leave out a peer */ | ||
| 338 | if (amount_v4 + amount_v6 < amount && amount_v6 < torrent->peer_list6->peer_count) | ||
| 339 | ++amount_v6; | ||
| 340 | if (amount_v4 + amount_v6 < amount && amount_v4 < torrent->peer_list4->peer_count) | ||
| 341 | ++amount_v4; | ||
| 280 | } | 342 | } |
| 281 | 343 | ||
| 282 | if( amount ) { | 344 | r += |
| 283 | if( amount == peer_list->peer_count ) | 345 | sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie", seed_count, down_count, peer_count, erval, erval / 2); |
| 284 | r += return_peers_all( peer_list, r ); | 346 | |
| 347 | if (amount_v4) { | ||
| 348 | r += sprintf(r, PEERS_BENCODED4 "%zd:", OT_PEER_COMPARE_SIZE4 * amount_v4); | ||
| 349 | if (amount_v4 == torrent->peer_list4->peer_count) | ||
| 350 | r += return_peers_all(torrent->peer_list4, OT_PEER_SIZE4, r); | ||
| 351 | else | ||
| 352 | r += return_peers_selection(ws, torrent->peer_list4, OT_PEER_SIZE4, amount_v4, r); | ||
| 353 | } | ||
| 354 | |||
| 355 | if (amount_v6) { | ||
| 356 | r += sprintf(r, PEERS_BENCODED6 "%zd:", OT_PEER_COMPARE_SIZE6 * amount_v6); | ||
| 357 | if (amount_v6 == torrent->peer_list6->peer_count) | ||
| 358 | r += return_peers_all(torrent->peer_list6, OT_PEER_SIZE6, r); | ||
| 285 | else | 359 | else |
| 286 | r += return_peers_selection( ws, peer_list, amount, r ); | 360 | r += return_peers_selection(ws, torrent->peer_list6, OT_PEER_SIZE6, amount_v6, r); |
| 287 | } | 361 | } |
| 288 | 362 | ||
| 289 | if( proto == FLAG_TCP ) | 363 | *r++ = 'e'; |
| 290 | *r++ = 'e'; | ||
| 291 | 364 | ||
| 292 | return r - reply; | 365 | return r - reply; |
| 293 | } | 366 | } |
| 294 | 367 | ||
| 368 | /* Compiles a list of random peers for a torrent | ||
| 369 | * Reply must have enough space to hold: | ||
| 370 | * 92 + 6 * amount bytes for TCP/IPv4 | ||
| 371 | * 92 + 18 * amount bytes for TCP/IPv6 | ||
| 372 | * 12 + 6 * amount bytes for UDP/IPv4 | ||
| 373 | * 12 + 18 * amount bytes for UDP/IPv6 | ||
| 374 | * Does not yet check not to return self | ||
| 375 | */ | ||
| 376 | size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto) { | ||
| 377 | return proto == FLAG_TCP ? return_peers_for_torrent_tcp(ws, torrent, amount, reply) : return_peers_for_torrent_udp(ws, torrent, amount, reply); | ||
| 378 | } | ||
| 379 | |||
| 295 | /* Fetches scrape info for a specific torrent */ | 380 | /* Fetches scrape info for a specific torrent */ |
| 296 | size_t return_udp_scrape_for_torrent( ot_hash hash, char *reply ) { | 381 | size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply) { |
| 297 | int exactmatch, delta_torrentcount = 0; | 382 | int exactmatch, delta_torrentcount = 0; |
| 298 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash ); | 383 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash); |
| 299 | ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 384 | ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
| 300 | 385 | ||
| 301 | if( !exactmatch ) { | 386 | if (!exactmatch) { |
| 302 | memset( reply, 0, 12); | 387 | memset(reply, 0, 12); |
| 303 | } else { | 388 | } else { |
| 304 | uint32_t *r = (uint32_t*) reply; | 389 | uint32_t *r = (uint32_t *)reply; |
| 305 | 390 | ||
| 306 | if( clean_single_torrent( torrent ) ) { | 391 | if (clean_single_torrent(torrent)) { |
| 307 | vector_remove_torrent( torrents_list, torrent ); | 392 | vector_remove_torrent(torrents_list, torrent); |
| 308 | memset( reply, 0, 12); | 393 | memset(reply, 0, 12); |
| 309 | delta_torrentcount = -1; | 394 | delta_torrentcount = -1; |
| 310 | } else { | 395 | } else { |
| 311 | r[0] = htonl( torrent->peer_list->seed_count ); | 396 | r[0] = htonl(torrent->peer_list6->seed_count + torrent->peer_list4->seed_count); |
| 312 | r[1] = htonl( torrent->peer_list->down_count ); | 397 | r[1] = htonl(torrent->peer_list6->down_count + torrent->peer_list4->down_count); |
| 313 | r[2] = htonl( torrent->peer_list->peer_count-torrent->peer_list->seed_count ); | 398 | r[2] = htonl(torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count); |
| 314 | } | 399 | } |
| 315 | } | 400 | } |
| 316 | mutex_bucket_unlock_by_hash( hash, delta_torrentcount ); | 401 | mutex_bucket_unlock_by_hash(hash, delta_torrentcount); |
| 317 | return 12; | 402 | return 12; |
| 318 | } | 403 | } |
| 319 | 404 | ||
| 320 | /* Fetches scrape info for a specific torrent */ | 405 | /* Fetches scrape info for a specific torrent */ |
| 321 | size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *reply ) { | 406 | size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply) { |
| 322 | char *r = reply; | 407 | char *r = reply; |
| 323 | int exactmatch, i; | 408 | int exactmatch, i; |
| 324 | 409 | ||
| 325 | r += sprintf( r, "d5:filesd" ); | 410 | r += sprintf(r, "d5:filesd"); |
| 326 | 411 | ||
| 327 | for( i=0; i<amount; ++i ) { | 412 | for (i = 0; i < amount; ++i) { |
| 328 | int delta_torrentcount = 0; | 413 | int delta_torrentcount = 0; |
| 329 | ot_hash *hash = hash_list + i; | 414 | ot_hash const *hash = hash_list + i; |
| 330 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( *hash ); | 415 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(*hash); |
| 331 | ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 416 | ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
| 332 | 417 | ||
| 333 | if( exactmatch ) { | 418 | if (exactmatch) { |
| 334 | if( clean_single_torrent( torrent ) ) { | 419 | if (clean_single_torrent(torrent)) { |
| 335 | vector_remove_torrent( torrents_list, torrent ); | 420 | vector_remove_torrent(torrents_list, torrent); |
| 336 | delta_torrentcount = -1; | 421 | delta_torrentcount = -1; |
| 337 | } else { | 422 | } else { |
| 338 | *r++='2';*r++='0';*r++=':'; | 423 | *r++ = '2'; |
| 339 | memcpy( r, hash, sizeof(ot_hash) ); r+=sizeof(ot_hash); | 424 | *r++ = '0'; |
| 340 | r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", | 425 | *r++ = ':'; |
| 341 | torrent->peer_list->seed_count, torrent->peer_list->down_count, torrent->peer_list->peer_count-torrent->peer_list->seed_count ); | 426 | memcpy(r, hash, sizeof(ot_hash)); |
| 427 | r += sizeof(ot_hash); | ||
| 428 | r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", torrent->peer_list6->seed_count + torrent->peer_list4->seed_count, | ||
| 429 | torrent->peer_list6->down_count + torrent->peer_list4->down_count, | ||
| 430 | torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count); | ||
| 342 | } | 431 | } |
| 343 | } | 432 | } |
| 344 | mutex_bucket_unlock_by_hash( *hash, delta_torrentcount ); | 433 | mutex_bucket_unlock_by_hash(*hash, delta_torrentcount); |
| 345 | } | 434 | } |
| 346 | 435 | ||
| 347 | *r++ = 'e'; *r++ = 'e'; | 436 | *r++ = 'e'; |
| 437 | *r++ = 'e'; | ||
| 348 | return r - reply; | 438 | return r - reply; |
| 349 | } | 439 | } |
| 350 | 440 | ||
| 351 | static ot_peerlist dummy_list; | 441 | static ot_peerlist dummy_list; |
| 352 | size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws ) { | 442 | size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws) { |
| 353 | int exactmatch; | 443 | int exactmatch; |
| 354 | ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash ); | 444 | ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash); |
| 355 | ot_torrent *torrent = binary_search( ws->hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch ); | 445 | ot_torrent *torrent = binary_search(ws->hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch); |
| 356 | ot_peerlist *peer_list = &dummy_list; | 446 | ot_peerlist *peer_list = &dummy_list; |
| 447 | size_t peer_size; /* initialized in next line */ | ||
| 448 | ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size); | ||
| 449 | size_t peer_count = 0, seed_count = 0; | ||
| 357 | 450 | ||
| 358 | #ifdef WANT_SYNC_LIVE | 451 | #ifdef WANT_SYNC_LIVE |
| 359 | if( proto != FLAG_MCA ) { | 452 | if (proto != FLAG_MCA) { |
| 360 | OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED; | 453 | OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED; |
| 361 | livesync_tell( ws ); | 454 | livesync_tell(ws); |
| 362 | } | 455 | } |
| 363 | #endif | 456 | #endif |
| 364 | 457 | ||
| 365 | if( exactmatch ) { | 458 | if (exactmatch) { |
| 366 | peer_list = torrent->peer_list; | 459 | peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4; |
| 367 | switch( vector_remove_peer( &peer_list->peers, &ws->peer ) ) { | 460 | switch (vector_remove_peer(&peer_list->peers, peer_src, peer_size)) { |
| 368 | case 2: peer_list->seed_count--; /* Intentional fallthrough */ | 461 | case 2: |
| 369 | case 1: peer_list->peer_count--; /* Intentional fallthrough */ | 462 | peer_list->seed_count--; /* Intentional fallthrough */ |
| 370 | default: break; | 463 | case 1: |
| 464 | peer_list->peer_count--; /* Intentional fallthrough */ | ||
| 465 | default: | ||
| 466 | break; | ||
| 371 | } | 467 | } |
| 468 | |||
| 469 | peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count; | ||
| 470 | seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count; | ||
| 372 | } | 471 | } |
| 373 | 472 | ||
| 374 | if( proto == FLAG_TCP ) { | 473 | if (proto == FLAG_TCP) { |
| 375 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; | 474 | int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM; |
| 376 | ws->reply_size = sprintf( ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie" PEERS_BENCODED "0:e", peer_list->seed_count, peer_list->peer_count - peer_list->seed_count, erval, erval / 2 ); | 475 | ws->reply_size = sprintf(ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie%s0:e", seed_count, peer_count - seed_count, erval, |
| 476 | erval / 2, peer_size == OT_PEER_SIZE6 ? PEERS_BENCODED6 : PEERS_BENCODED4); | ||
| 377 | } | 477 | } |
| 378 | 478 | ||
| 379 | /* Handle UDP reply */ | 479 | /* Handle UDP reply */ |
| 380 | if( proto == FLAG_UDP ) { | 480 | if (proto == FLAG_UDP) { |
| 381 | ((uint32_t*)ws->reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM ); | 481 | ((uint32_t *)ws->reply)[2] = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM); |
| 382 | ((uint32_t*)ws->reply)[3] = htonl( peer_list->peer_count - peer_list->seed_count ); | 482 | ((uint32_t *)ws->reply)[3] = htonl(peer_count - seed_count); |
| 383 | ((uint32_t*)ws->reply)[4] = htonl( peer_list->seed_count); | 483 | ((uint32_t *)ws->reply)[4] = htonl(seed_count); |
| 384 | ws->reply_size = 20; | 484 | ws->reply_size = 20; |
| 385 | } | 485 | } |
| 386 | 486 | ||
| 387 | mutex_bucket_unlock_by_hash( *ws->hash, 0 ); | 487 | mutex_bucket_unlock_by_hash(*ws->hash, 0); |
| 388 | return ws->reply_size; | 488 | return ws->reply_size; |
| 389 | } | 489 | } |
| 390 | 490 | ||
| 391 | void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ) { | 491 | void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data) { |
| 392 | int bucket; | 492 | int bucket; |
| 393 | size_t j; | 493 | size_t j; |
| 394 | 494 | ||
| 395 | for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 495 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
| 396 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 496 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
| 397 | ot_torrent *torrents = (ot_torrent*)(torrents_list->data); | 497 | ot_torrent *torrents = (ot_torrent *)(torrents_list->data); |
| 398 | 498 | ||
| 399 | for( j=0; j<torrents_list->size; ++j ) | 499 | for (j = 0; j < torrents_list->size; ++j) |
| 400 | if( for_each( torrents + j, data ) ) | 500 | if (for_each(torrents + j, data)) |
| 401 | break; | 501 | break; |
| 402 | 502 | ||
| 403 | mutex_bucket_unlock( bucket, 0 ); | 503 | mutex_bucket_unlock(bucket, 0); |
| 404 | if( !g_opentracker_running ) return; | 504 | if (!g_opentracker_running) |
| 505 | return; | ||
| 506 | } | ||
| 507 | } | ||
| 508 | |||
| 509 | ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size) { | ||
| 510 | ot_ip6 *ip = (ot_ip6 *)peer; | ||
| 511 | if (!ip6_isv4mapped(ip)) { | ||
| 512 | *peer_size = OT_PEER_SIZE6; | ||
| 513 | return (ot_peer *)peer; | ||
| 514 | } | ||
| 515 | *peer_size = OT_PEER_SIZE4; | ||
| 516 | return (ot_peer *)(((uint8_t *)peer) + 12); | ||
| 517 | } | ||
| 518 | |||
| 519 | size_t peer_size_from_peer6(ot_peer6 *peer) { | ||
| 520 | ot_ip6 *ip = (ot_ip6 *)peer; | ||
| 521 | if (!ip6_isv4mapped(ip)) | ||
| 522 | return OT_PEER_SIZE6; | ||
| 523 | return OT_PEER_SIZE4; | ||
| 524 | } | ||
| 525 | |||
| 526 | #ifdef _DEBUG_RANDOMTORRENTS | ||
| 527 | void trackerlogic_add_random_torrents(size_t amount) { | ||
| 528 | struct ot_workstruct ws; | ||
| 529 | memset(&ws, 0, sizeof(ws)); | ||
| 530 | |||
| 531 | ws.inbuf = malloc(G_INBUF_SIZE); | ||
| 532 | ws.outbuf = malloc(G_OUTBUF_SIZE); | ||
| 533 | ws.reply = ws.outbuf; | ||
| 534 | ws.hash = (ot_hash *)ws.inbuf; | ||
| 535 | |||
| 536 | while (amount--) { | ||
| 537 | arc4random_buf(ws.hash, sizeof(ot_hash)); | ||
| 538 | arc4random_buf(&ws.peer, sizeof(ws.peer)); | ||
| 539 | |||
| 540 | OT_PEERFLAG(ws.peer) &= PEER_FLAG_SEEDING | PEER_FLAG_COMPLETED | PEER_FLAG_STOPPED; | ||
| 541 | |||
| 542 | add_peer_to_torrent_and_return_peers(FLAG_TCP, &ws, 1); | ||
| 405 | } | 543 | } |
| 544 | |||
| 545 | free(ws.inbuf); | ||
| 546 | free(ws.outbuf); | ||
| 406 | } | 547 | } |
| 548 | #endif | ||
| 407 | 549 | ||
| 408 | void exerr( char * message ) { | 550 | void exerr(char *message) { |
| 409 | fprintf( stderr, "%s\n", message ); | 551 | fprintf(stderr, "%s\n", message); |
| 410 | exit( 111 ); | 552 | exit(111); |
| 411 | } | 553 | } |
| 412 | 554 | ||
| 413 | void trackerlogic_init( ) { | 555 | void trackerlogic_init() { |
| 414 | g_tracker_id = random(); | 556 | g_tracker_id = random(); |
| 415 | 557 | ||
| 416 | if( !g_stats_path ) | 558 | if (!g_stats_path) |
| 417 | g_stats_path = "stats"; | 559 | g_stats_path = "stats"; |
| 418 | g_stats_path_len = strlen( g_stats_path ); | 560 | g_stats_path_len = strlen(g_stats_path); |
| 419 | 561 | ||
| 420 | /* Initialise background worker threads */ | 562 | /* Initialise background worker threads */ |
| 421 | mutex_init( ); | 563 | mutex_init(); |
| 422 | clean_init( ); | 564 | clean_init(); |
| 423 | fullscrape_init( ); | 565 | fullscrape_init(); |
| 424 | accesslist_init( ); | 566 | accesslist_init(); |
| 425 | livesync_init( ); | 567 | livesync_init(); |
| 426 | stats_init( ); | 568 | stats_init(); |
| 427 | } | 569 | } |
| 428 | 570 | ||
| 429 | void trackerlogic_deinit( void ) { | 571 | void trackerlogic_deinit(void) { |
| 430 | int bucket, delta_torrentcount = 0; | 572 | int bucket, delta_torrentcount = 0; |
| 431 | size_t j; | 573 | size_t j; |
| 432 | 574 | ||
| 433 | /* Free all torrents... */ | 575 | /* Free all torrents... */ |
| 434 | for(bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) { | 576 | for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) { |
| 435 | ot_vector *torrents_list = mutex_bucket_lock( bucket ); | 577 | ot_vector *torrents_list = mutex_bucket_lock(bucket); |
| 436 | if( torrents_list->size ) { | 578 | if (torrents_list->size) { |
| 437 | for( j=0; j<torrents_list->size; ++j ) { | 579 | for (j = 0; j < torrents_list->size; ++j) { |
| 438 | ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + j; | 580 | ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + j; |
| 439 | free_peerlist( torrent->peer_list ); | 581 | free_peerlist(torrent->peer_list6); |
| 582 | free_peerlist(torrent->peer_list4); | ||
| 440 | delta_torrentcount -= 1; | 583 | delta_torrentcount -= 1; |
| 441 | } | 584 | } |
| 442 | free( torrents_list->data ); | 585 | free(torrents_list->data); |
| 443 | } | 586 | } |
| 444 | mutex_bucket_unlock( bucket, delta_torrentcount ); | 587 | mutex_bucket_unlock(bucket, delta_torrentcount); |
| 445 | } | 588 | } |
| 446 | 589 | ||
| 447 | /* Deinitialise background worker threads */ | 590 | /* Deinitialise background worker threads */ |
| 448 | stats_deinit( ); | 591 | stats_deinit(); |
| 449 | livesync_deinit( ); | 592 | livesync_deinit(); |
| 450 | accesslist_deinit( ); | 593 | accesslist_deinit(); |
| 451 | fullscrape_deinit( ); | 594 | fullscrape_deinit(); |
| 452 | clean_deinit( ); | 595 | clean_deinit(); |
| 453 | /* Release mutexes */ | 596 | /* Release mutexes */ |
| 454 | mutex_deinit( ); | 597 | mutex_deinit(); |
| 455 | } | 598 | } |
| 456 | |||
| 457 | const char *g_version_trackerlogic_c = "$Source$: $Revision$\n"; | ||
diff --git a/trackerlogic.h b/trackerlogic.h index ef59179..022184d 100644 --- a/trackerlogic.h +++ b/trackerlogic.h | |||
| @@ -6,11 +6,11 @@ | |||
| 6 | #ifndef OT_TRACKERLOGIC_H__ | 6 | #ifndef OT_TRACKERLOGIC_H__ |
| 7 | #define OT_TRACKERLOGIC_H__ | 7 | #define OT_TRACKERLOGIC_H__ |
| 8 | 8 | ||
| 9 | #include <sys/types.h> | ||
| 10 | #include <sys/time.h> | ||
| 11 | #include <time.h> | ||
| 12 | #include <stdint.h> | 9 | #include <stdint.h> |
| 13 | #include <stdlib.h> | 10 | #include <stdlib.h> |
| 11 | #include <sys/time.h> | ||
| 12 | #include <sys/types.h> | ||
| 13 | #include <time.h> | ||
| 14 | 14 | ||
| 15 | #if defined(__linux__) && defined(WANT_ARC4RANDOM) | 15 | #if defined(__linux__) && defined(WANT_ARC4RANDOM) |
| 16 | #include <bsd/stdlib.h> | 16 | #include <bsd/stdlib.h> |
| @@ -22,111 +22,133 @@ | |||
| 22 | typedef uint8_t ot_hash[20]; | 22 | typedef uint8_t ot_hash[20]; |
| 23 | typedef time_t ot_time; | 23 | typedef time_t ot_time; |
| 24 | typedef char ot_ip6[16]; | 24 | typedef char ot_ip6[16]; |
| 25 | typedef struct { ot_ip6 address; int bits; } | 25 | typedef struct { |
| 26 | ot_net; | 26 | ot_ip6 address; |
| 27 | #ifdef WANT_V6 | 27 | int bits; |
| 28 | #define OT_IP_SIZE 16 | 28 | } ot_net; |
| 29 | #define PEERS_BENCODED "6:peers6" | 29 | /* List of peers should fit in a single UDP packet (around 1200 bytes) */ |
| 30 | #else | 30 | #define OT_MAX_PEERS_UDP6 66 |
| 31 | #define OT_IP_SIZE 4 | 31 | #define OT_MAX_PEERS_UDP4 200 |
| 32 | #define PEERS_BENCODED "5:peers" | 32 | |
| 33 | #endif | 33 | #define OT_IP_SIZE6 16 |
| 34 | #define OT_IP_SIZE4 4 | ||
| 35 | #define OT_PORT_SIZE 2 | ||
| 36 | #define OT_FLAG_SIZE 1 | ||
| 37 | #define OT_TIME_SIZE 1 | ||
| 34 | 38 | ||
| 35 | /* Some tracker behaviour tunable */ | 39 | /* Some tracker behaviour tunable */ |
| 36 | #define OT_CLIENT_TIMEOUT 30 | 40 | #define OT_CLIENT_TIMEOUT 30 |
| 37 | #define OT_CLIENT_TIMEOUT_CHECKINTERVAL 10 | 41 | #define OT_CLIENT_TIMEOUT_CHECKINTERVAL 10 |
| 38 | #define OT_CLIENT_TIMEOUT_SEND (60*15) | 42 | #define OT_CLIENT_TIMEOUT_SEND (60 * 15) |
| 39 | #define OT_CLIENT_REQUEST_INTERVAL (60*30) | 43 | #define OT_CLIENT_REQUEST_INTERVAL (60 * 30) |
| 40 | #define OT_CLIENT_REQUEST_VARIATION (60*6) | 44 | #define OT_CLIENT_REQUEST_VARIATION (60 * 6) |
| 41 | 45 | ||
| 42 | #define OT_TORRENT_TIMEOUT_HOURS 24 | 46 | #define OT_TORRENT_TIMEOUT_HOURS 24 |
| 43 | #define OT_TORRENT_TIMEOUT (60*OT_TORRENT_TIMEOUT_HOURS) | 47 | #define OT_TORRENT_TIMEOUT (60 * OT_TORRENT_TIMEOUT_HOURS) |
| 44 | 48 | ||
| 45 | #define OT_CLIENT_REQUEST_INTERVAL_RANDOM ( OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION/2 + (int)( nrand48(ws->rand48_state) % OT_CLIENT_REQUEST_VARIATION ) ) | 49 | #define OT_CLIENT_REQUEST_INTERVAL_RANDOM \ |
| 50 | (OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION / 2 + (int)(nrand48(ws->rand48_state) % OT_CLIENT_REQUEST_VARIATION)) | ||
| 46 | 51 | ||
| 47 | /* If WANT_MODEST_FULLSCRAPES is on, ip addresses may not | 52 | /* If WANT_MODEST_FULLSCRAPES is on, ip addresses may not |
| 48 | fullscrape more frequently than this amount in seconds */ | 53 | fullscrape more frequently than this amount in seconds */ |
| 49 | #define OT_MODEST_PEER_TIMEOUT (60*5) | 54 | #define OT_MODEST_PEER_TIMEOUT (60 * 5) |
| 50 | 55 | ||
| 51 | /* If peers come back before 10 minutes, don't live sync them */ | 56 | /* If peers come back before 10 minutes, don't live sync them */ |
| 52 | #define OT_CLIENT_SYNC_RENEW_BOUNDARY 10 | 57 | #define OT_CLIENT_SYNC_RENEW_BOUNDARY 10 |
| 53 | 58 | ||
| 54 | /* Number of tracker admin ip addresses allowed */ | 59 | /* Number of tracker admin ip addresses allowed */ |
| 55 | #define OT_ADMINIP_MAX 64 | 60 | #define OT_ADMINIP_MAX 64 |
| 56 | #define OT_MAX_THREADS 64 | 61 | #define OT_MAX_THREADS 64 |
| 57 | 62 | ||
| 58 | #define OT_PEER_TIMEOUT 45 | 63 | /* Number of minutes after announce before peer is removed */ |
| 64 | #define OT_PEER_TIMEOUT 45 | ||
| 59 | 65 | ||
| 60 | /* We maintain a list of 1024 pointers to sorted list of ot_torrent structs | 66 | /* We maintain a list of 1024 pointers to sorted list of ot_torrent structs |
| 61 | Sort key is, of course, its hash */ | 67 | Sort key is, of course, its hash */ |
| 62 | #define OT_BUCKET_COUNT_BITS 10 | 68 | #define OT_BUCKET_COUNT_BITS 10 |
| 69 | |||
| 70 | #define OT_BUCKET_COUNT (1 << OT_BUCKET_COUNT_BITS) | ||
| 71 | #define OT_BUCKET_COUNT_SHIFT (32 - OT_BUCKET_COUNT_BITS) | ||
| 63 | 72 | ||
| 64 | #define OT_BUCKET_COUNT (1<<OT_BUCKET_COUNT_BITS) | 73 | /* if _DEBUG_RANDOMTORRENTS is set, this is the amount of torrents to create |
| 65 | #define OT_BUCKET_COUNT_SHIFT (32-OT_BUCKET_COUNT_BITS) | 74 | on startup */ |
| 75 | #define RANDOMTORRENTS (1024 * 1024 * 1) | ||
| 66 | 76 | ||
| 67 | /* From opentracker.c */ | 77 | /* From opentracker.c */ |
| 68 | extern time_t g_now_seconds; | 78 | extern time_t g_now_seconds; |
| 69 | extern volatile int g_opentracker_running; | 79 | extern volatile int g_opentracker_running; |
| 70 | #define g_now_minutes (g_now_seconds/60) | 80 | #define g_now_minutes (g_now_seconds / 60) |
| 71 | 81 | ||
| 72 | extern uint32_t g_tracker_id; | 82 | extern uint32_t g_tracker_id; |
| 73 | typedef enum { FLAG_TCP, FLAG_UDP, FLAG_MCA, FLAG_SELFPIPE } PROTO_FLAG; | 83 | typedef enum { FLAG_TCP, FLAG_UDP, FLAG_MCA, FLAG_SELFPIPE } PROTO_FLAG; |
| 74 | 84 | ||
| 75 | typedef struct { | 85 | #define OT_PEER_COMPARE_SIZE6 ((OT_IP_SIZE6) + (OT_PORT_SIZE)) |
| 76 | uint8_t data[OT_IP_SIZE+2+2]; | 86 | #define OT_PEER_COMPARE_SIZE4 ((OT_IP_SIZE4) + (OT_PORT_SIZE)) |
| 77 | } ot_peer; | 87 | #define OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(PEER_SIZE) ((PEER_SIZE) - (OT_TIME_SIZE) - (OT_FLAG_SIZE)) |
| 88 | |||
| 89 | #define OT_PEER_SIZE6 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE6)) | ||
| 90 | #define OT_PEER_SIZE4 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE4)) | ||
| 91 | |||
| 92 | typedef uint8_t ot_peer; /* Generic pointer to a v6 or v4 peer */ | ||
| 93 | typedef uint8_t ot_peer6[OT_PEER_SIZE6]; | ||
| 94 | typedef uint8_t ot_peer4[OT_PEER_SIZE4]; | ||
| 78 | static const uint8_t PEER_FLAG_SEEDING = 0x80; | 95 | static const uint8_t PEER_FLAG_SEEDING = 0x80; |
| 79 | static const uint8_t PEER_FLAG_COMPLETED = 0x40; | 96 | static const uint8_t PEER_FLAG_COMPLETED = 0x40; |
| 80 | static const uint8_t PEER_FLAG_STOPPED = 0x20; | 97 | static const uint8_t PEER_FLAG_STOPPED = 0x20; |
| 81 | static const uint8_t PEER_FLAG_FROM_SYNC = 0x10; | 98 | static const uint8_t PEER_FLAG_FROM_SYNC = 0x10; |
| 82 | static const uint8_t PEER_FLAG_LEECHING = 0x00; | 99 | static const uint8_t PEER_FLAG_LEECHING = 0x00; |
| 83 | 100 | ||
| 84 | #ifdef WANT_V6 | 101 | /* Takes an ot_peer6 and returns the proper pointer to the peer and sets peer_size */ |
| 85 | #define OT_SETIP(peer,ip) memcpy((peer),(ip),(OT_IP_SIZE)) | 102 | ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size); |
| 86 | #else | 103 | size_t peer_size_from_peer6(ot_peer6 *peer); |
| 87 | #define OT_SETIP(peer,ip) memcpy((peer),(((uint8_t*)ip)+12),(OT_IP_SIZE)) | ||
| 88 | #endif | ||
| 89 | #define OT_SETPORT(peer,port) memcpy(((uint8_t*)(peer))+(OT_IP_SIZE),(port),2) | ||
| 90 | #define OT_PEERFLAG(peer) (((uint8_t*)(peer))[(OT_IP_SIZE)+2]) | ||
| 91 | #define OT_PEERTIME(peer) (((uint8_t*)(peer))[(OT_IP_SIZE)+3]) | ||
| 92 | 104 | ||
| 93 | #define OT_HASH_COMPARE_SIZE (sizeof(ot_hash)) | 105 | /* New style */ |
| 94 | #define OT_PEER_COMPARE_SIZE ((OT_IP_SIZE)+2) | 106 | #define OT_SETIP(peer, ip) memcpy((uint8_t *)(peer), (ip), OT_IP_SIZE6) |
| 107 | #define OT_SETPORT(peer, port) memcpy(((uint8_t *)(peer)) + (OT_IP_SIZE6), (port), 2) | ||
| 108 | #define OT_PEERFLAG(peer) (((uint8_t *)(peer))[(OT_IP_SIZE6) + 2]) | ||
| 109 | #define OT_PEERFLAG_D(peer, peersize) (((uint8_t *)(peer))[(peersize) - 2]) | ||
| 110 | #define OT_PEERTIME(peer, peersize) (((uint8_t *)(peer))[(peersize) - 1]) | ||
| 111 | |||
| 112 | #define PEERS_BENCODED6 "6:peers6" | ||
| 113 | #define PEERS_BENCODED4 "5:peers" | ||
| 114 | |||
| 115 | #define OT_HASH_COMPARE_SIZE (sizeof(ot_hash)) | ||
| 95 | 116 | ||
| 96 | struct ot_peerlist; | 117 | struct ot_peerlist; |
| 97 | typedef struct ot_peerlist ot_peerlist; | 118 | typedef struct ot_peerlist ot_peerlist; |
| 98 | typedef struct { | 119 | typedef struct { |
| 99 | ot_hash hash; | 120 | ot_hash hash; |
| 100 | ot_peerlist *peer_list; | 121 | ot_peerlist *peer_list6; |
| 122 | ot_peerlist *peer_list4; | ||
| 101 | } ot_torrent; | 123 | } ot_torrent; |
| 102 | 124 | ||
| 103 | #include "ot_vector.h" | 125 | #include "ot_vector.h" |
| 104 | 126 | ||
| 105 | struct ot_peerlist { | 127 | struct ot_peerlist { |
| 106 | ot_time base; | 128 | ot_time base; |
| 107 | size_t seed_count; | 129 | size_t seed_count; |
| 108 | size_t peer_count; | 130 | size_t peer_count; |
| 109 | size_t down_count; | 131 | size_t down_count; |
| 110 | /* normal peers vector or | 132 | /* normal peers vector or |
| 111 | pointer to ot_vector[32] buckets if data != NULL and space == 0 | 133 | pointer to ot_vector[32] buckets if data != NULL and space == 0 |
| 112 | */ | 134 | */ |
| 113 | ot_vector peers; | 135 | ot_vector peers; |
| 114 | }; | 136 | }; |
| 115 | #define OT_PEERLIST_HASBUCKETS(peer_list) ((peer_list)->peers.size > (peer_list)->peers.space) | 137 | #define OT_PEERLIST_HASBUCKETS(peer_list) ((peer_list)->peers.size > (peer_list)->peers.space) |
| 116 | 138 | ||
| 117 | struct ot_workstruct { | 139 | struct ot_workstruct { |
| 118 | /* Thread specific, static */ | 140 | /* Thread specific, static */ |
| 119 | char *inbuf; | 141 | char *inbuf; |
| 120 | #define G_INBUF_SIZE 8192 | 142 | #define G_INBUF_SIZE 8192 |
| 121 | char *outbuf; | 143 | char *outbuf; |
| 122 | #define G_OUTBUF_SIZE 8192 | 144 | #define G_OUTBUF_SIZE 8192 |
| 123 | #ifdef _DEBUG_HTTPERROR | 145 | #ifdef _DEBUG_HTTPERROR |
| 124 | char *debugbuf; | 146 | char *debugbuf; |
| 125 | #define G_DEBUGBUF_SIZE 8192 | 147 | #define G_DEBUGBUF_SIZE 8192 |
| 126 | #endif | 148 | #endif |
| 127 | 149 | ||
| 128 | /* The peer currently in the working */ | 150 | /* The peer currently in the working */ |
| 129 | ot_peer peer; | 151 | ot_peer6 peer; /* Can fit v6 and v4 peers */ |
| 130 | 152 | ||
| 131 | /* Pointers into the request buffer */ | 153 | /* Pointers into the request buffer */ |
| 132 | ot_hash *hash; | 154 | ot_hash *hash; |
| @@ -155,31 +177,34 @@ struct ot_workstruct { | |||
| 155 | #endif | 177 | #endif |
| 156 | 178 | ||
| 157 | #ifdef WANT_SYNC | 179 | #ifdef WANT_SYNC |
| 158 | #define WANT_SYNC_PARAM( param ) , param | 180 | #define WANT_SYNC_PARAM(param) , param |
| 159 | #else | 181 | #else |
| 160 | #define WANT_SYNC_PARAM( param ) | 182 | #define WANT_SYNC_PARAM(param) |
| 161 | #endif | 183 | #endif |
| 162 | 184 | ||
| 163 | #ifdef WANT_LOG_NETWORKS | 185 | #ifdef WANT_LOG_NETWORKS |
| 164 | #error Live logging networks disabled at the moment. | 186 | #error Live logging networks disabled at the moment. |
| 165 | #endif | 187 | #endif |
| 166 | 188 | ||
| 167 | void trackerlogic_init( void ); | 189 | void trackerlogic_init(void); |
| 168 | void trackerlogic_deinit( void ); | 190 | void trackerlogic_deinit(void); |
| 169 | void exerr( char * message ); | 191 | void exerr(char *message); |
| 170 | 192 | ||
| 171 | /* add_peer_to_torrent does only release the torrent bucket if from_sync is set, | 193 | /* add_peer_to_torrent does only release the torrent bucket if from_sync is set, |
| 172 | otherwise it is released in return_peers_for_torrent */ | 194 | otherwise it is released in return_peers_for_torrent */ |
| 173 | size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount ); | 195 | size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount); |
| 174 | size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws ); | 196 | size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws); |
| 175 | size_t return_tcp_scrape_for_torrent( ot_hash *hash, int amount, char *reply ); | 197 | size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply); |
| 176 | size_t return_udp_scrape_for_torrent( ot_hash hash, char *reply ); | 198 | size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply); |
| 177 | void add_torrent_from_saved_state( ot_hash hash, ot_time base, size_t down_count ); | 199 | void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count); |
| 200 | #ifdef _DEBUG_RANDOMTORRENTS | ||
| 201 | void trackerlogic_add_random_torrents(size_t amount); | ||
| 202 | #endif | ||
| 178 | 203 | ||
| 179 | /* torrent iterator */ | 204 | /* torrent iterator */ |
| 180 | void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ); | 205 | void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data); |
| 181 | 206 | ||
| 182 | /* Helper, before it moves to its own object */ | 207 | /* Helper, before it moves to its own object */ |
| 183 | void free_peerlist( ot_peerlist *peer_list ); | 208 | void free_peerlist(ot_peerlist *peer_list); |
| 184 | 209 | ||
| 185 | #endif | 210 | #endif |
