Compare commits

...

93 Commits

Author SHA1 Message Date
Nicolas Arenas cbd5409077 refs #2310 Add post script 2025-06-25 12:30:28 +02:00
Nicolas Arenas 6b8350c86f refs #2310 Add service to package 2025-06-25 12:15:06 +02:00
Nicolas Arenas 8f32947383 refs: #2310 create package 2025-06-25 07:30:58 +02:00
Dirk Engling b20b0b8926 Do not ignore return value of iob_init_autofree 2025-04-25 23:35:04 +02:00
Dirk Engling 123e8aa1b9 Silence warning about using a pointer after realloc, take offset of match before 2025-04-25 21:49:53 +02:00
Dirk Engling c854b3db9b address linter warnings. Thanks to gagath@debian.org 2024-06-22 14:39:10 +02:00
Dirk Engling 59c1c30107 Initialise top leechers vector 2024-05-10 16:31:42 +02:00
Dirk Engling 37f5b2403b Add top100 torrents by leechers to the list 2024-05-10 15:41:52 +02:00
Dirk Engling 2c88c7b65a Disable forced gzip by default 2024-04-21 15:52:30 +02:00
Dirk Engling 8b11bc6653 Group feature options that need additional compiler flags in Makefiles, so they can be enable together 2024-04-21 15:36:36 +02:00
Dirk Engling e8d385f176 Move _GNU_SOURCE declaration further to the top, so recursive inclusion of stdio does not ignore the macro 2024-04-21 14:58:42 +02:00
Dirk Engling 492da4a57e Better point out what options can be given multiple times 2024-04-18 18:07:35 +02:00
Dirk Engling 9a4710c2a4 add example for option that may be given more than once 2024-04-18 18:05:29 +02:00
Dirk Engling e5e1a54da3 further fixes to the man page 2024-04-18 18:03:58 +02:00
Dirk Engling f010861f61 Add missing config option 2024-04-18 17:56:48 +02:00
Dirk Engling 22fbcf5647 Spelling in sample config 2024-04-18 17:54:27 +02:00
Dirk Engling 398c5fe1b6 Add man page for the config file 2024-04-18 17:52:18 +02:00
Dirk Engling b01f1a0723 Some fixes to the man page 2024-04-18 17:39:51 +02:00
Dirk Engling dcc47f9612 Add first draft of a man page 2024-04-18 16:40:29 +02:00
Dirk Engling 33bd2c9094 Add support for zstd 2024-04-18 14:54:34 +02:00
Dirk Engling 160ba08074 Only have huge downloads (aka chunked) be application/octet-stream 2024-04-18 14:50:51 +02:00
Dirk Engling b56e648b5e Let our fullscrapes have a binary content-type 2024-04-17 21:34:20 +02:00
Dirk Engling ff03fd7449 chrome does not accept chunked gzip encoding for http/1.0 protocol version 2024-04-17 15:03:14 +02:00
Dirk Engling d3985b00b5 Remove CVS version tags, they did not work anymore after moving to git 2024-04-16 16:18:53 +02:00
Dirk Engling 8fd8a54c4e Install default signal handlers before spawning first thread 2024-04-15 23:08:16 +02:00
Dirk Engling 62807ad205 deflatePending ist not available everywhere. Just treat the (very rare) case of some data being left over like all other reallocatables 2024-04-15 17:58:34 +02:00
Dirk Engling 806a6b99cf time_caching_worker never returns, but if it would, it should return a value 2024-04-15 17:34:06 +02:00
Dirk Engling a6c4766348 define _GNU_SOURCE to silence warning about asprintf 2024-04-15 16:44:07 +02:00
Dirk Engling a4161f911f Use bzero instead of the new iob_init for older libowfats 2024-04-15 16:38:37 +02:00
Dirk Engling e0cd3b1831 fix: git version string is not yet available 2024-04-15 15:26:15 +02:00
Dirk Engling 6cdebf31ac Remove warning 2024-04-15 15:20:34 +02:00
Dirk Engling f8637baaeb Allow the use of iob_init if the new libowfat is not yet available 2024-04-15 15:20:21 +02:00
Dirk Engling b1606fd37e clang-format 2024-04-15 00:44:16 +02:00
Dirk Engling 7a48a69345 clang-format 2024-04-15 00:41:43 +02:00
Dirk Engling 7c633c259e clang-format 2024-04-15 00:39:02 +02:00
Dirk Engling 4c5935c057 Formatter description 2024-04-14 22:43:20 +02:00
Dirk Engling 7428c12e5d Spelling 2024-04-14 17:34:03 +02:00
Dirk Engling 464038a091 print when RANDOMTORRENTS were created (fix) 2024-04-14 17:33:24 +02:00
Dirk Engling 4dcb75a736 print when RANDOMTORRENTS were created 2024-04-14 17:32:52 +02:00
Dirk Engling 0ce6c42aaa print when RANDOMTORRENTS were created 2024-04-14 17:32:18 +02:00
Dirk Engling 04e0eca0a0 Make the amount of random torrents a tunable 2024-04-14 17:31:16 +02:00
Dirk Engling cab821f253 Silence warning in debug code 2024-04-14 13:14:00 +02:00
Dirk Engling 236c9292f6 gzip fullscrapes by default now 2024-04-14 13:13:47 +02:00
Dirk Engling 4ff25fc9c6 Revert b8ee3dfec1 2024-04-14 13:12:27 +02:00
Dirk Engling 6ae819ae10 remove debug fprintfs used to catch a bug 2024-04-14 13:12:11 +02:00
Dirk Engling b8ee3dfec1 Use the HELPLINE macro for help lines 2024-04-14 13:09:41 +02:00
Dirk Engling fceffdefba Call io_dontwantread only once on a socket 2024-04-14 05:12:42 +02:00
Dirk Engling f4a389da3f Merge branch 'chunked-transfers' of erdgeist.org:opentracker into chunked-transfers 2024-04-13 19:22:42 +02:00
Dirk Engling 0e9cc66de2 Cancel chunked transfers on dead sockets, too 2024-04-13 19:22:36 +02:00
Dirk Engling 79f5e272aa white space 2024-04-13 16:56:51 +02:00
Dirk Engling 57f859728d Initialise io_batches with their appropiate init function 2024-04-13 16:54:44 +02:00
Dirk Engling a3d27dff8c Remove unused macro 2024-04-13 16:54:20 +02:00
Dirk Engling 638ca0f56c Use asprintf to allocate header instead of fixed array 2024-04-13 16:53:29 +02:00
Dirk Engling 3a2a711a29 Fix type warnings in debug strings 2024-04-13 16:52:46 +02:00
Dirk Engling bd4992435c Add proper parsing for the gzip content encoding 2024-04-13 14:04:18 +02:00
Dirk Engling 35f55290f3 Make chunked transfers use gzip also 2024-04-13 12:51:04 +02:00
Dirk Engling 2eeae0a65a Allow opentracker to start with a random set of torrents for debugging purposes 2024-04-13 12:26:08 +02:00
Dirk Engling 3a6d99dd46 Better track current iobatch 2024-04-13 02:54:08 +02:00
Dirk Engling 1a70d9f9ef First shot on chunked transfers 2024-04-13 00:47:29 +02:00
Dirk Engling 301faeb10c Start an extra thread to update timer cache. Using signals was unnecessary 2024-04-12 18:10:31 +02:00
Dirk Engling 52d9829f81 Reserve enough memory to hold all peer timeout buckets 2024-04-10 01:13:59 +02:00
Dirk Engling 1b976863fc white space 2024-04-10 00:25:45 +02:00
Dirk Engling ffa7b81690 Only query torrents if we actually found one 2024-04-09 02:50:45 +02:00
Dirk Engling 18a746b89d report full peer and seed count for both address families 2024-04-07 21:29:36 +02:00
Dirk Engling aedd7e30cb Silence warning on older compilers 2024-04-07 00:37:38 +02:00
Dirk Engling 18554498c2 Whitespace fix 2024-04-07 00:06:54 +02:00
Dirk Engling b7b84bdec4 Number of reported removed peers was too high, correct it by size of peer 2024-04-07 00:06:27 +02:00
Dirk Engling db28465e0c if copying seeders from behind, use compare_size to advance pointer 2024-04-06 21:35:15 +02:00
Dirk Engling d9a5f04675 Return peer from both address families on announce 2024-04-06 19:21:03 +02:00
Dirk Engling 880d5145a0 Avoid incompatible pointer type warning 2024-04-06 00:55:24 +02:00
Dirk Engling 29784f1f41 Add stddef header 2024-04-06 00:29:45 +02:00
Dirk Engling b4d948696d Improve example for proxy network addresses 2024-04-05 22:52:18 +02:00
Dirk Engling 9000f5d67a minor cleanups. Breaks proxy.c 2024-04-05 22:44:30 +02:00
Dirk Engling 524d78d6c7 constify 2024-04-05 16:30:02 +02:00
Dirk Engling 64e25b681c Ignore object files 2024-04-05 03:30:00 +02:00
Dirk Engling 83a0a108e0 Merge branch 'dualstack-opentracker' of erdgeist.org:opentracker into dualstack-opentracker 2024-04-05 03:27:17 +02:00
Dirk Engling a09609d94e Enable live syncing v6 peers 2024-04-05 03:26:42 +02:00
Dirk Engling 8ccf4b43d7 gcc is no longer default cc 2024-04-04 18:00:50 +02:00
Dirk Engling 308e91a2fa -pthread needs to be in CFLAGS 2024-04-04 17:56:35 +02:00
Dirk Engling 9f615bbebf -pthread is the only necessary linker option for pthread API 2024-04-04 17:55:27 +02:00
Dirk Engling 959e0912a1 Bind dual stack by default 2024-04-03 23:08:17 +02:00
Dirk Engling 7c3279a028 Allow binding to v4 and v6 addresses always 2024-04-03 22:50:01 +02:00
Dirk Engling 2afc4893bf Prepare opentracker for dual stack capabilities 2024-04-03 22:25:30 +02:00
Dirk Engling eb8834f778 Move further towards dual stack capable opentracker, name more constants 2024-04-01 23:00:34 +02:00
Dirk Engling 9275eb3f82 Clean up pointer to task object 2024-04-01 00:52:34 +02:00
Dirk Engling 73e839f5ff Tidy up v6 handler 2024-04-01 00:48:09 +02:00
Dirk Engling aca3ee0ac8 Prevent proxied ips of the wrong flavour to poison our clients 2024-03-31 13:36:26 +02:00
Dirk Engling 5b98dcf3a3 Limit ipv6 udp replies to an amount that does not create too large UDP packets. Credits to anonymous donor 2024-03-30 00:34:28 +01:00
Dirk Engling a3251ffac7 mask bits to be checked in ot_net 2024-03-29 03:58:23 +01:00
Dirk Engling 5805fe5f86 Dont take address of address 2024-03-29 03:44:31 +01:00
Dirk Engling 543ab73017 Allow networks to be used instead of ip addresses when blessing is involved 2024-03-29 03:30:13 +01:00
Dirk Engling ede702c7ff Add docs about reverse proxies 2024-03-10 14:45:31 +01:00
Dirk Engling 6604d65779 Some clients have added the /stats URL as /announce URL and cause a lot of expensive operations. Prevent that 2024-03-09 23:58:41 +01:00
72 changed files with 4931 additions and 3019 deletions

246
.clang-format 100644
View File

@ -0,0 +1,246 @@
---
Language: Cpp
# BasedOnStyle: LLVM
AccessModifierOffset: -2
AlignAfterOpenBracket: Align
AlignArrayOfStructures: None
AlignConsecutiveAssignments:
Enabled: true
AcrossEmptyLines: true
AcrossComments: true
AlignCompound: true
AlignFunctionPointers: false
PadOperators: true
AlignConsecutiveBitFields:
Enabled: false
AcrossEmptyLines: true
AcrossComments: true
AlignCompound: false
AlignFunctionPointers: false
PadOperators: false
AlignConsecutiveDeclarations:
Enabled: true
AcrossEmptyLines: true
AcrossComments: true
AlignCompound: true
AlignFunctionPointers: false
PadOperators: true
AlignConsecutiveMacros:
Enabled: true
AcrossEmptyLines: true
AcrossComments: true
AlignCompound: true
AlignFunctionPointers: false
PadOperators: false
AlignConsecutiveShortCaseStatements:
Enabled: true
AcrossEmptyLines: true
AcrossComments: true
AlignCaseColons: false
AlignEscapedNewlines: Right
AlignOperands: Align
AlignTrailingComments:
Kind: Always
OverEmptyLines: 0
AllowAllArgumentsOnNextLine: true
AllowAllParametersOfDeclarationOnNextLine: true
AllowBreakBeforeNoexceptSpecifier: Never
AllowShortBlocksOnASingleLine: Never
AllowShortCaseLabelsOnASingleLine: false
AllowShortCompoundRequirementOnASingleLine: true
AllowShortEnumsOnASingleLine: true
AllowShortFunctionsOnASingleLine: All
AllowShortIfStatementsOnASingleLine: Never
AllowShortLambdasOnASingleLine: All
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: MultiLine
AttributeMacros:
- __capability
BinPackArguments: true
BinPackParameters: true
BitFieldColonSpacing: Both
BraceWrapping:
AfterCaseLabel: false
AfterClass: false
AfterControlStatement: Never
AfterEnum: false
AfterExternBlock: false
AfterFunction: false
AfterNamespace: false
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
BeforeCatch: false
BeforeElse: false
BeforeLambdaBody: false
BeforeWhile: false
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakAdjacentStringLiterals: true
BreakAfterAttributes: Leave
BreakAfterJavaFieldAnnotations: false
BreakArrays: true
BreakBeforeBinaryOperators: None
BreakBeforeConceptDeclarations: Always
BreakBeforeBraces: Attach
BreakBeforeInlineASMColon: OnlyMultiline
BreakBeforeTernaryOperators: true
BreakConstructorInitializers: BeforeColon
BreakInheritanceList: BeforeColon
BreakStringLiterals: false
ColumnLimit: 160
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
DisableFormat: false
EmptyLineAfterAccessModifier: Never
EmptyLineBeforeAccessModifier: LogicalBlock
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: true
ForEachMacros:
- foreach
- Q_FOREACH
- BOOST_FOREACH
IfMacros:
- KJ_IF_MAYBE
IncludeBlocks: Preserve
IncludeCategories:
- Regex: '^"(llvm|llvm-c|clang|clang-c)/'
Priority: 2
SortPriority: 0
CaseSensitive: false
- Regex: '^(<|"(gtest|gmock|isl|json)/)'
Priority: 3
SortPriority: 0
CaseSensitive: false
- Regex: '.*'
Priority: 1
SortPriority: 0
CaseSensitive: false
IncludeIsMainRegex: '(Test)?$'
IncludeIsMainSourceRegex: ''
IndentAccessModifiers: false
IndentCaseBlocks: false
IndentCaseLabels: false
IndentExternBlock: AfterExternBlock
IndentGotoLabels: true
IndentPPDirectives: None
IndentRequiresClause: true
IndentWidth: 2
IndentWrappedFunctionNames: false
InsertBraces: false
InsertNewlineAtEOF: false
InsertTrailingCommas: None
IntegerLiteralSeparator:
Binary: 0
BinaryMinDigits: 0
Decimal: 0
DecimalMinDigits: 0
Hex: 0
HexMinDigits: 0
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: true
KeepEmptyLinesAtEOF: false
LambdaBodyIndentation: Signature
LineEnding: DeriveLF
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Auto
ObjCBlockIndentWidth: 2
ObjCBreakBeforeNestedBlockParam: true
ObjCSpaceAfterProperty: false
ObjCSpaceBeforeProtocolList: true
PackConstructorInitializers: BinPack
PenaltyBreakAssignment: 2
PenaltyBreakBeforeFirstCallParameter: 19
PenaltyBreakComment: 300
PenaltyBreakFirstLessLess: 120
PenaltyBreakOpenParenthesis: 0
PenaltyBreakScopeResolution: 500
PenaltyBreakString: 1000
PenaltyBreakTemplateDeclaration: 10
PenaltyExcessCharacter: 1000000
PenaltyIndentedWhitespace: 0
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
PPIndentWidth: -1
QualifierAlignment: Leave
ReferenceAlignment: Pointer
ReflowComments: true
RemoveBracesLLVM: false
RemoveParentheses: Leave
RemoveSemicolon: false
RequiresClausePosition: OwnLine
RequiresExpressionIndentation: OuterScope
SeparateDefinitionBlocks: Leave
ShortNamespaceLines: 1
SkipMacroDefinitionBody: false
SortIncludes: CaseSensitive
SortJavaStaticImport: Before
SortUsingDeclarations: LexicographicNumeric
SpaceAfterCStyleCast: false
SpaceAfterLogicalNot: false
SpaceAfterTemplateKeyword: true
SpaceAroundPointerQualifiers: Default
SpaceBeforeAssignmentOperators: true
SpaceBeforeCaseColon: false
SpaceBeforeCpp11BracedList: false
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeJsonColon: false
SpaceBeforeParens: ControlStatements
SpaceBeforeParensOptions:
AfterControlStatements: true
AfterForeachMacros: true
AfterFunctionDefinitionName: false
AfterFunctionDeclarationName: false
AfterIfMacros: true
AfterOverloadedOperator: false
AfterPlacementOperator: true
AfterRequiresInClause: false
AfterRequiresInExpression: false
BeforeNonEmptyParentheses: false
SpaceBeforeRangeBasedForLoopColon: true
SpaceBeforeSquareBrackets: false
SpaceInEmptyBlock: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: Never
SpacesInContainerLiterals: true
SpacesInLineCommentPrefix:
Minimum: 1
Maximum: -1
SpacesInParens: Never
SpacesInParensOptions:
InCStyleCasts: false
InConditionalStatements: false
InEmptyParentheses: false
Other: false
SpacesInSquareBrackets: false
Standard: Latest
StatementAttributeLikeMacros:
- Q_EMIT
StatementMacros:
- Q_UNUSED
- QT_REQUIRE_VERSION
TabWidth: 8
UseTab: Never
VerilogBreakBetweenInstancePorts: true
WhitespaceSensitiveMacros:
- BOOST_PP_STRINGIZE
- CF_SWIFT_NAME
- NS_SWIFT_NAME
- PP_STRINGIZE
- STRINGIZE
...

2
.gitignore vendored 100644
View File

@ -0,0 +1,2 @@
*.o

View File

@ -1,7 +1,5 @@
# $Id$
CC?=gcc
# Linux flavour
# PREFIX?=/opt/diet
# LIBOWFAT_HEADERS=$(PREFIX)/include
@ -20,16 +18,24 @@ LIBOWFAT_LIBRARY=$(PREFIX)/libowfat
BINDIR?=$(PREFIX)/bin
STRIP?=strip
#FEATURES+=-DWANT_V6
#FEATURES+=-DWANT_V4_ONLY
#FEATURES+=-DWANT_ACCESSLIST_BLACK
#FEATURES+=-DWANT_ACCESSLIST_WHITE
#FEATURES+=-DWANT_DYNAMIC_ACCESSLIST
#FEATURES+=-DWANT_SYNC_LIVE
#FEATURES+=-DWANT_IP_FROM_QUERY_STRING
#FEATURES+=-DWANT_COMPRESSION_GZIP
#FEATURES+=-DWANT_COMPRESSION_GZIP_ALWAYS
# If you want gzip support to be compiled in, uncomment the next include.
# You can further modify the behaviour by setting DWANT_COMPRESSION_GZIP_ALWAYS
# in Makefile.gzip
include Makefile.gzip
# If you want zstd support to be compiled in, uncomment the next include.
# You can further modify the behaviour by setting DWANT_COMPRESSION_ZSTD_ALWAYS
# in Makefile.zstd
#include Makefile.zstd
#FEATURES+=-DWANT_LOG_NETWORKS
#FEATURES+=-DWANT_RESTRICT_STATS
#FEATURES+=-DWANT_IP_FROM_PROXY
@ -41,18 +47,24 @@ STRIP?=strip
#FEATURES+=-DWANT_DEV_RANDOM
FEATURES+=-DWANT_FULLSCRAPE
# You need libowfat version 0.34 to allow for automatic release of chunks during
# full scrape transfer, if you rely on an older versions, enable this flag
#FEATURES+=-DWANT_NO_AUTO_FREE
# Is enabled on BSD systems by default in trackerlogic.h
# on Linux systems you will need -lbds
#FEATURES+=-DWANT_ARC4RANDOM
# on Linux systems the include Makefile adds -lbsd
#include Makefile.arc4random
#FEATURES+=-D_DEBUG_HTTPERROR
#FEATURES+=-D_DEBUG_RANDOMTORRENTS
GIT_VERSION=$(shell sh -c 'command -v git >/dev/null && test -d .git && git rev-parse HEAD || echo _git_or_commit_not_found_')
OPTS_debug=-D_DEBUG -g -ggdb # -pg -fprofile-arcs -ftest-coverage
OPTS_production=-O3
CFLAGS+=-I$(LIBOWFAT_HEADERS) -Wall -pipe -Wextra #-ansi -pedantic
LDFLAGS+=-L$(LIBOWFAT_LIBRARY) -lowfat -pthread -lpthread -lz
#LDFLAGS+=-lbsd
CFLAGS+=-I$(LIBOWFAT_HEADERS) -DGIT_VERSION=$(GIT_VERSION) -Wall -pipe -pthread -Wextra #-ansi -pedantic
LDFLAGS+=-L$(LIBOWFAT_LIBRARY) -lowfat -pthread
BINARY =opentracker
HEADERS=trackerlogic.h scan_urlencoded_query.h ot_mutex.h ot_stats.h ot_vector.h ot_clean.h ot_udp.h ot_iovec.h ot_fullscrape.h ot_accesslist.h ot_http.h ot_livesync.h ot_rijndael.h

View File

@ -0,0 +1,3 @@
FEATURES+=-DWANT_ARC4RANDOM
LDFLAGS+=-lbsd

4
Makefile.gzip 100644
View File

@ -0,0 +1,4 @@
FEATURES+=-DWANT_COMPRESSION_GZIP
#FEATURES+=-DWANT_COMPRESSION_GZIP_ALWAYS
LDFLAGS+=-lz

3
Makefile.zstd 100644
View File

@ -0,0 +1,3 @@
FEATURES+=-DWANT_COMPRESSION_ZSTD
#FEATURES+=-DWANT_COMPRESSION_ZSTD_ALWAYS
LDFLAGS+=-lzstd

8
debian/NEWS vendored 100644
View File

@ -0,0 +1,8 @@
opentracker (0.0~git20210823.110868e-8) unstable; urgency=medium
This package now ships a different /etc/opentracker/opentracker-ipv6.conf
from the previously common /etc/opentracker/opentracker.conf. This is
because the IPv6 version breaks if an IPv4 listen IP is configured. See
bug #1053721.
-- Agathe Porte <gagath@debian.org> Sun, 10 Dec 2023 13:44:53 +0100

10
debian/README.debian vendored 100644
View File

@ -0,0 +1,10 @@
By default, the tracker is run in "private" mode using a whitelist access.
You need to explicitly list all of the torrent hashes that you want to announce
into the /etc/opentracker/whitelist.txt file.
If you want to switch to "public" mode, allowing to announce any torrent hash,
you must recompile the package. You need to comment the
FEATURES+=-DWANT_ACCESSLIST_WHITE option and uncomment the
FEATURES+=-DWANT_ACCESSLIST_BLACK option in the debian/rules file.
Switching to "public" mode is not recommended by the package maintainer.

6
debian/changelog vendored 100644
View File

@ -0,0 +1,6 @@
opengnsys-opentracker (1.0.0) unstable; urgency=medium
* Rename package to avoid conflicts with ubuntu packages
* Build without whitelist support
-- Nicolas Arenas <narenas@qindel.com> Tue, 24 Jun 2025 22:09:00 +0100

2
debian/clean vendored 100644
View File

@ -0,0 +1,2 @@
opengnsys-opentracker-ipv4/
opengnsys-opentracker-ipv6/

58
debian/control vendored 100644
View File

@ -0,0 +1,58 @@
Source: opengnsys-opentracker
Section: net
Priority: optional
Maintainer: Nicolas Arenas <narenas@qindel.com>
Build-Depends:
debhelper-compat (= 13),
libowfat-dev,
zlib1g-dev,
Standards-Version: 4.6.2
Homepage:
Vcs-Browser: https://ognproject.evlt.uma.es/gitea/narenas/opengnsys-opentracker.git
Vcs-Git: https://ognproject.evlt.uma.es/gitea/narenas/opengnsys-opentracker.git
Rules-Requires-Root: no
Package: opengnsys-opentracker-common
Architecture: all
Pre-Depends: ${misc:Pre-Depends}
Depends:
${misc:Depends},
Description: Open and free bittorrent tracker (common files)
opentracker is a open and free bittorrent tracker project. It aims for minimal
resource usage and is intended to run on your wlan router.
.
This package contains the common files for both IPv4 and IPv6 versions.
Breaks:
opentracker
Replaces:
opentracker
Package: opengnsys-opentracker
Architecture: any
Pre-Depends: ${misc:Pre-Depends}
Depends:
${misc:Depends},
${shlibs:Depends},
opengnsys-opentracker-common,
Description: Open and free bittorrent tracker (IPv4)
opentracker is a open and free bittorrent tracker project. It aims for minimal
resource usage and is intended to run on your wlan router.
.
This package contains the IPv4 version.
Breaks:
opentracker-common ,
Replaces:
opentracker-common ,
Package: opengnsys-opentracker-ipv6
Architecture: any
Pre-Depends: ${misc:Pre-Depends}
Depends:
${misc:Depends},
${shlibs:Depends},
opengnsys-opentracker-common,
Description: Open and free bittorrent tracker (IPv6)
opentracker is a open and free bittorrent tracker project. It aims for minimal
resource usage and is intended to run on your wlan router.
.
This package contains the IPv6 version.

33
debian/copyright vendored 100644
View File

@ -0,0 +1,33 @@
Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
Upstream-Name: opentracker
Upstream-Contact: Dirk Engling <erdgeist@erdgeist.org>
Source: https://erdgeist.org/arts/software/opentracker/
Files: *
Copyright: Dirk Engling <erdgeist@erdgeist.org>
License: Beerware
THE BEER-WARE LICENSE" (Revision 42):
As long as you retain this notice you can do whatever you want with this
stuff. If we meet some day, and you think this stuff is worth it, you can buy
me a beer in return.
Files: debian/*
Copyright:
2015 Kaliko Jack <kaliko@azylum.org>
2022-2024 Agathe Porte <gagath@debian.org>
License: GPL-2+
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>
.
On Debian systems, the complete text of the GNU General
Public License version 2 can be found in "/usr/share/common-licenses/GPL-2".

3
debian/dirs vendored 100644
View File

@ -0,0 +1,3 @@
etc/opentracker
usr/bin
var/lib/opentracker

3
debian/gbp.conf vendored 100644
View File

@ -0,0 +1,3 @@
[DEFAULT]
upstream-branch=upstream/latest
debian-branch=debian/latest

View File

@ -0,0 +1,2 @@
README
README_v6

View File

@ -0,0 +1 @@
debian/whitelist.txt etc/opentracker/

View File

@ -0,0 +1 @@
usr/share/man/man8/opentracker.8 usr/share/man/man8/opentracker-ipv6.8

View File

@ -0,0 +1 @@
debian/opentracker.1

View File

@ -0,0 +1,2 @@
opengnsys-opentracker-ipv6/opentracker-ipv6 usr/bin
debian/opentracker-ipv6.conf etc/opentracker

View File

@ -0,0 +1,3 @@
opengnsys-opentracker-ipv4/opentracker usr/bin
debian/opentracker.conf etc/opentracker/
debian/opentracker.service lib/systemd/system/

View File

@ -0,0 +1,10 @@
#!/bin/sh
set -e
if [ "$1" = "configure" ]; then
systemctl daemon-reexec || true
systemctl preset opentracker.service >/dev/null 2>&1 || true
systemctl enable opentracker.service >/dev/null 2>&1 || true
fi
exit 0

View File

@ -0,0 +1,9 @@
#!/bin/sh
set -e
if [ "$1" = "remove" ]; then
systemctl disable opentracker.service >/dev/null 2>&1 || true
systemctl daemon-reload || true
fi
exit 0

106
debian/opentracker-ipv6.conf vendored 100644
View File

@ -0,0 +1,106 @@
# opentracker config file
#
# I) Address opentracker will listen on, using both, tcp AND udp family
# (note, that port 6969 is implicite if ommitted).
#
# If no listen option is given (here or on the command line), opentracker
# listens on 0.0.0.0:6969 tcp and udp.
#
# The next variable determines if udp sockets are handled in the event
# loop (set it to 0, the default) or are handled in blocking reads in
# dedicated worker threads. You have to set this value before the
# listen.tcp_udp or listen.udp statements before it takes effect, but you
# can re-set it for each listen statement. Normally you should keep it at
# the top of the config file.
#
# listen.udp.workers 4
#
# listen.tcp_udp 0.0.0.0
# listen.tcp_udp 192.168.0.1:80
# listen.tcp_udp 10.0.0.5:6969
#
# To only listen on tcp or udp family ports, list them this way:
#
# listen.tcp 0.0.0.0
# listen.udp 192.168.0.1:6969
#
# Note, that using 0.0.0.0 for udp sockets may yield surprising results.
# An answer packet sent on that socket will not necessarily have the
# source address that the requesting client may expect, but any address
# on that interface.
#
# II) If opentracker runs in a non-open mode, point it to files containing
# all torrent hashes that it will serve (shell option -w)
#
access.whitelist /etc/opentracker/whitelist.txt
#
# or, if opentracker was compiled to allow blacklisting (shell option -b)
#
# access.blacklist ./blacklist
#
# It is pointless and hence not possible to compile black AND white
# listing, so choose one of those options at compile time. File format
# is straight forward: "<hex info hash>\n<hex info hash>\n..."
#
# If you do not want to grant anyone access to your stats, enable the
# WANT_RESTRICT_STATS option in Makefile and bless the ip addresses
# allowed to fetch stats here.
#
# access.stats 192.168.0.23
#
# There is another way of hiding your stats. You can obfuscate the path
# to them. Normally it is located at /stats but you can configure it to
# appear anywhere on your tracker.
#
# access.stats_path stats
# III) Live sync uses udp multicast packets to keep a cluster of opentrackers
# synchronized. This option tells opentracker which port to listen for
# incoming live sync packets. The ip address tells opentracker, on which
# interface to join the multicast group, those packets will arrive.
# (shell option -i 192.168.0.1 -s 9696), port 9696 is default.
#
# livesync.cluster.listen 192.168.0.1:9696
#
# Note that two udp sockets will be opened. One on ip address 0.0.0.0
# port 9696, that will join the multicast group 224.0.42.23 for incoming
# udp packets and one on ip address 192.168.0.1 port 9696 for outgoing
# udp packets.
#
# As of now one and only one ip address must be given, if opentracker
# was built with the WANT_SYNC_LIVE feature.
#
# IV) Sync between trackers running in a cluster is restricted to packets
# coming from trusted ip addresses. While source ip verification is far
# from perfect, the authors of opentracker trust in the correct
# application of tunnels, filters and LAN setups (shell option -A).
#
# livesync.cluster.node_ip 192.168.0.4
# livesync.cluster.node_ip 192.168.0.5
# livesync.cluster.node_ip 192.168.0.6
#
# This is the admin ip address for old style (HTTP based) asynchronus
# tracker syncing.
#
# batchsync.cluster.admin_ip 10.1.1.1
#
# V) Control privilege drop behaviour.
# Put in the directory opentracker will chroot/chdir to. All black/white
# list files must be put in that directory (shell option -d).
#
#
tracker.rootdir /etc/opentracker
#
# Tell opentracker which user to setuid to.
#
tracker.user _opentracker
#
# VI) opentracker can be told to answer to a "GET / HTTP"-request with a
# redirect to another location (shell option -r).
#
# tracker.redirect_url https://your.tracker.local/

14
debian/opentracker-ipv6.service vendored 100644
View File

@ -0,0 +1,14 @@
[Unit]
Description=OpenTracker Daemon (IPv6)
After=network.target
Documentation=man:opentracker
[Service]
User=_opentracker
PrivateTmp=yes
ExecStart=/usr/bin/opentracker-ipv6 -f /etc/opentracker/opentracker-ipv6.conf
ExecStop=/bin/kill -INT $MAINPID
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target

73
debian/opentracker.1 vendored 100644
View File

@ -0,0 +1,73 @@
.TH OPENTRACKER "8" "July 2021" "opentracker" "User Commands"
.SH NAME
opentracker \- Open bittorrent tracker
.SH SYNOPSIS
.PP
\fBopentracker\fR
[ -\fBi\fR \fIip\fR ]
[ -\fBp\fR \fIport\fR ]
[ -\fBP\fR \fIport\fR ]
[ -\fBr\fR \fIredirect\fR ]
[ -\fBd\fR \fIdir\fR ]
[ -\fBu\fR \fIuser\fR ]
[ -\fBA\fR \fIip\fR ]
[ -\fBf\fR \fIconfig\fR ]
[ -\fBs\fR \fIlivesyncport\fR ]
[ -\fBw\fR \fIwhitelistfile\fR ]
.SH DESCRIPTION
.PP
This tracker is open in a sense that everyone announcing a torrent is welcome
to do so and will be informed about anyone else announcing the same torrent.
Only source IPs are accepted. The tracker implements a minimal
set of essential features only but was able respond to far more than 10000
requests per second on a Sun Fire 2200 M2 (that's where we found no more clients
able to fire more of our testsuite.sh script).
.SH OPTIONS
.TP
-\fBf\fR \fIconfig\fR
include and execute the config file
.TP
-\fBi\fR \fIip\fR
specify ip to bind to (default: *, you may specify more than one)
.TP
-\fBp\fR \fIport\fR
specify tcp port to bind to (default: 6969, you may specify more than one)
.TP
-\fBP\fR \fIport\fR
specify udp port to bind to (default: 6969, you may specify more than one)
.TP
-\fBr\fR \fIredirecturlspecify\fR
url where / should be redirected to (default none)
.TP
-\fBd\fR \fIdir\fR
specify directory to try to chroot to (default: ".")
.TP
-\fBu\fR \fIuser\fR
specify user under whose privileges opentracker should run (default: "nobody")
.TP
-\fBA\fR \fIip\fR
bless an ip address as admin address (e.g. to allow syncs from this address)
.TP
-\fBw\fR \fIfile\fR
specify whitelist file
\fB\-h, \-?, \-\-help\fP
show a help message and exit
.TP
\fB\-ht, \-\-host\fI HOST\fP
host (default: localhost)
.TP
\fB\-p, \-\-port\fI PORT\fP
port (default: 8080)
.TP
\fB\-mp, \-\-multiprocessor\fI PROCESS_COUNT\fP
define how many processes for the grammar checker
.TP
\fB\-t, \-\-test_page\fP
page to test the server on /
.TP
\fB\-on, \-\-opt-on\fR [\fIOPT\fR]...\fP
activate options
.TP
\fB\-off, \-\-opt-off\fR [\fIOPT\fR]...\fP
deactivate options

106
debian/opentracker.conf vendored 100644
View File

@ -0,0 +1,106 @@
# opentracker config file
#
# I) Address opentracker will listen on, using both, tcp AND udp family
# (note, that port 6969 is implicite if ommitted).
#
# If no listen option is given (here or on the command line), opentracker
# listens on 0.0.0.0:6969 tcp and udp.
#
# The next variable determines if udp sockets are handled in the event
# loop (set it to 0, the default) or are handled in blocking reads in
# dedicated worker threads. You have to set this value before the
# listen.tcp_udp or listen.udp statements before it takes effect, but you
# can re-set it for each listen statement. Normally you should keep it at
# the top of the config file.
#
# listen.udp.workers 4
#
# listen.tcp_udp 0.0.0.0
# listen.tcp_udp 192.168.0.1:80
# listen.tcp_udp 10.0.0.5:6969
#
# To only listen on tcp or udp family ports, list them this way:
#
# listen.tcp 0.0.0.0
# listen.udp 192.168.0.1:6969
#
# Note, that using 0.0.0.0 for udp sockets may yield surprising results.
# An answer packet sent on that socket will not necessarily have the
# source address that the requesting client may expect, but any address
# on that interface.
#
# II) If opentracker runs in a non-open mode, point it to files containing
# all torrent hashes that it will serve (shell option -w)
#
# access.whitelist /etc/opentracker/whitelist.txt
#
# or, if opentracker was compiled to allow blacklisting (shell option -b)
#
# access.blacklist ./blacklist
#
# It is pointless and hence not possible to compile black AND white
# listing, so choose one of those options at compile time. File format
# is straight forward: "<hex info hash>\n<hex info hash>\n..."
#
# If you do not want to grant anyone access to your stats, enable the
# WANT_RESTRICT_STATS option in Makefile and bless the ip addresses
# allowed to fetch stats here.
#
# access.stats 192.168.0.23
#
# There is another way of hiding your stats. You can obfuscate the path
# to them. Normally it is located at /stats but you can configure it to
# appear anywhere on your tracker.
#
# access.stats_path stats
# III) Live sync uses udp multicast packets to keep a cluster of opentrackers
# synchronized. This option tells opentracker which port to listen for
# incoming live sync packets. The ip address tells opentracker, on which
# interface to join the multicast group, those packets will arrive.
# (shell option -i 192.168.0.1 -s 9696), port 9696 is default.
#
# livesync.cluster.listen 192.168.0.1:9696
#
# Note that two udp sockets will be opened. One on ip address 0.0.0.0
# port 9696, that will join the multicast group 224.0.42.23 for incoming
# udp packets and one on ip address 192.168.0.1 port 9696 for outgoing
# udp packets.
#
# As of now one and only one ip address must be given, if opentracker
# was built with the WANT_SYNC_LIVE feature.
#
# IV) Sync between trackers running in a cluster is restricted to packets
# coming from trusted ip addresses. While source ip verification is far
# from perfect, the authors of opentracker trust in the correct
# application of tunnels, filters and LAN setups (shell option -A).
#
# livesync.cluster.node_ip 192.168.0.4
# livesync.cluster.node_ip 192.168.0.5
# livesync.cluster.node_ip 192.168.0.6
#
# This is the admin ip address for old style (HTTP based) asynchronus
# tracker syncing.
#
# batchsync.cluster.admin_ip 10.1.1.1
#
# V) Control privilege drop behaviour.
# Put in the directory opentracker will chroot/chdir to. All black/white
# list files must be put in that directory (shell option -d).
#
#
tracker.rootdir /etc/opentracker
#
# Tell opentracker which user to setuid to.
#
tracker.user _opentracker
#
# VI) opentracker can be told to answer to a "GET / HTTP"-request with a
# redirect to another location (shell option -r).
#
# tracker.redirect_url https://your.tracker.local/

14
debian/opentracker.service vendored 100644
View File

@ -0,0 +1,14 @@
[Unit]
Description=OpenTracker Daemon (IPv4)
After=network.target
Documentation=man:opentracker
[Service]
User=_opentracker
PrivateTmp=yes
ExecStart=/usr/bin/opentracker -f /etc/opentracker/opentracker.conf
ExecStop=/bin/kill -INT $MAINPID
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target

View File

@ -0,0 +1,25 @@
From: Agathe Porte <gagath@debian.org>
Date: Tue, 24 May 2022 14:36:47 +0200
Subject: testsuite: send 4 requests instead of infinity
For Debian autopkgtest needs, we want to run send a definite amount of
requests instead of an infinite amount. 4 requests should be enough, as
response time can be quite long.
Forwarded: not-needed
---
tests/testsuite.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/tests/testsuite.sh b/tests/testsuite.sh
index dace2c6..fe480f5 100644
--- a/tests/testsuite.sh
+++ b/tests/testsuite.sh
@@ -1,6 +1,6 @@
#!/bin/sh
-while true; do
+for _ in $(seq 4); do
request_string="GET /announce?info_hash=0123456789012345678\
%$(printf %02X $(( $RANDOM & 0xf )) )\
&ip=$(( $RANDOM & 0xf )).$(( $RANDOM & 0xf )).13.16&port=$(( $RANDOM & 0xff )) HTTP/1.0\n"

1
debian/patches/series vendored 100644
View File

@ -0,0 +1 @@
0001-testsuite-send-4-requests-instead-of-infinity.patch

34
debian/postinst vendored 100644
View File

@ -0,0 +1,34 @@
#!/bin/sh
set -e
HOMEDIR=/var/lib/opentracker
case "$1" in
configure)
if ! getent passwd _opentracker > /dev/null 2>&1
then
useradd --system --user-group \
--home-dir "$HOMEDIR" \
--no-create-home \
--shel /bin/false \
--comment "Opentracker bittorrent tracker daemon" \
_opentracker
fi
if ! dpkg-statoverride --list "$HOMEDIR" >/dev/null 2>&1; then
dpkg-statoverride --update --add _opentracker _opentracker 0750 "$HOMEDIR"
fi
;;
abort-upgrade|abort-remove|abort-deconfigure)
;;
*)
echo "postinst called with unknown argument \`${1}'" >&2
exit 1
;;
esac
#DEBHELPER#

65
debian/rules vendored 100755
View File

@ -0,0 +1,65 @@
#!/usr/bin/make -f
# See debhelper(7) (uncomment to enable)
# output every command that modifies files on the build system.
#DH_VERBOSE = 1
export DEB_BUILD_MAINT_OPTIONS += hardening=+all
# see EXAMPLES in dpkg-buildflags(1) and read /usr/share/dpkg/*
DPKG_EXPORT_BUILDFLAGS = 1
include /usr/share/dpkg/buildflags.mk
CFLAGS+=$(CPPFLAGS)
CXXFLAGS+=$(CPPFLAGS)
# see ENVIRONMENT in dpkg-buildflags(1)
# package maintainers to append CFLAGS
export DEB_CFLAGS_MAINT_APPEND += -Wall -pedantic
#FEATURES+=-DWANT_ACCESSLIST_BLACK
#FEATURES+=-DWANT_ACCESSLIST_WHITE
#FEATURES+=-DWANT_IP_FROM_QUERY_STRING
FEATURES+=-DWANT_COMPRESSION_GZIP
#FEATURES+=-DWANT_IP_FROM_PROXY
#FEATURES+=-DWANT_LOG_NUMWANT -DWANT_MODEST_FULLSCRAPES
#FEATURES+=-DWANT_SPOT_WOODPECKER
FEATURES+=-DWANT_SYSLOGS
#FEATURES+=-DWANT_DEV_RANDOM
FEATURES+=-DWANT_RESTRICT_STATS
FEATURES+=-DWANT_NO_AUTO_FREE
export FEATURES
export LIBOWFAT_LIBRARY=/usr/lib
export PREFIX=/usr
# main packaging script based on dh7 syntax
%:
dh $@
LINKEDFILES = ../*.h ../*.c ../Makefile ../Makefile.*
# create a shadow directory for setting different build options.
# inspired from the vim source package
opengnsys-opentracker-%:
mkdir -p "$@"
cd "$@" && ln -s $(LINKEDFILES) .
override_dh_auto_clean:
dh_auto_clean
rm -rf opengnsys-opentracker-ipv4 opengnsys-opentracker-ipv6
override_dh_auto_build: opengnsys-opentracker-ipv4 opengnsys-opentracker-ipv6
STRIP=true FEATURES="$(FEATURES)" dh_auto_build --sourcedirectory=opengnsys-opentracker-ipv4 -- LIBOWFAT_HEADERS=/usr/include/libowfat
STRIP=true FEATURES="$(FEATURES) -DWANT_V6" dh_auto_build --sourcedirectory=opengnsys-opentracker-ipv6 -- LIBOWFAT_HEADERS=/usr/include/libowfat
mv opengnsys-opentracker-ipv6/opentracker opengnsys-opentracker-ipv6/opentracker-ipv6
dpkg-parsechangelog -l debian/NEWS
override_dh_auto_install:
# do not call the install target of the upstream makefile, because
# executables are renamed
dh_auto_install -S none --sourcedirectory=opengnsys-opentracker-ipv4
dh_auto_install -S none --sourcedirectory=opengnsys-opentracker-ipv6
rm -rf debian/opengnsys-opentracker-common/usr/bin

4
debian/salsa-ci.yml vendored 100644
View File

@ -0,0 +1,4 @@
include:
- https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/salsa-ci.yml
- https://salsa.debian.org/salsa-ci-team/pipeline/raw/master/pipeline-jobs.yml

1
debian/source/format vendored 100644
View File

@ -0,0 +1 @@
3.0 (quilt)

View File

@ -0,0 +1,2 @@
# upstream has no known bug tracker
upstream-metadata-missing-bug-tracking

4
debian/tests/control vendored 100644
View File

@ -0,0 +1,4 @@
Tests: testsuite
Depends:
@,
netcat-openbsd,

8
debian/tests/testsuite vendored 100755
View File

@ -0,0 +1,8 @@
#!/bin/bash
set -euo pipefail
cp -a tests $AUTOPKGTEST_TMP
cd $AUTOPKGTEST_TMP
bash ./tests/testsuite.sh

2
debian/upstream/metadata vendored 100644
View File

@ -0,0 +1,2 @@
Repository: https://erdgeist.org/gitweb/opentracker
Repository-Browse: https://erdgeist.org/gitweb/opentracker/

5
debian/watch vendored 100644
View File

@ -0,0 +1,5 @@
version=4
opts="mode=git, gitmode=full, pgpmode=none, pretty=0.0~git%cd.%h, repack, compression=xz" \
https://erdgeist.org/gitweb/opentracker \
HEAD

8
debian/whitelist.txt vendored 100644
View File

@ -0,0 +1,8 @@
# By default the tracker is run in "private" mode, using this whitelist.
#
# Add one hash per torrent you want to allow your tracker to announce.
# Example:
# 285862f84278e5e08f3a4f6606d2e768e594d5c3
#
# To switch to "public" mode, allowing to announce any torrent hash, comment
# the "access.whitelist" option in /etc/opentracker/opentracker.conf

130
man1/opentracker.1 100644
View File

@ -0,0 +1,130 @@
.Dd 15/4/2024
.Dt opentracker 1
.Os Unix
.Sh opentracker
.Nm opentracker
.Nd a free and open bittorrent tracker
.Sh SYNOPSIS
.Nm
.Op Fl f Ar config
.Op Fl i Ar ip-select
.Op Fl p Ar port-bind-tcp
.Op Fl P Ar port-bind-udp
.Op Fl A Ar blessed-ip
.Op Fl r Ar redirect-url
.Op Fl d Ar chdir
.Op Fl u Ar user
.Op Fl w| Fl b accesslist
.Sh DESCRIPTION
.Nm
is a bittorrent tracker that implements announce and scrape actions over the
UDP and the plain http protocol, aiming for minimal resource usage.
.Pp
When invoked with parameters, it binds to TCP and UDP port 6969 on all
interfaces. The recommended way to configure opentracker is by providing a
config file using the
.Op Fl f Ar config
option. See
.Xr opentracker.conf 4
for details.
.Pp
.Sh OPTIONS
The following options are available:
.Bl -tag -width -indent=8
.It Fl f Ar config
Parse a config file with a list of options. Consecutive command options
will override options from the config file. See
.Xr opentracker.conf 4
for details.
.It Fl i Ar ip-select
Select an ip address that will be used with the next
.Op Fl p
or
.Op Fl P
command to actually bind to this address. Setting this option without any bind
options in the config file or
.Op Fl p
or
.Op Fl P
commands will limit opentracker to only bind to this address.
.It Fl p Ar port-bind-tcp
Bind to the TCP port on the last preceding ip address set with the
.Op Fl i ip-select
option or to all available addresses if none has been set. Can be given multiple
times.
.It Fl P Ar port-bind-udp
Bind to the UDP port on the last preceding ip address set with the
.Op Fl i ip-select
option or to all available addresses if none has been set. Can be given multiple
times.
.It Fl A Ar blessed-ip
Set an ip address in IPv4 or IPv6 or a net in CIDR notation to bless the network
for access to restricted resources.
.It Fl r Ar redirect-url
Set the URL that
.Nm
will redirect users to when the / address is requested via HTTP.
.It Fl d Ar chdir
Sets the directory
.Nm
will
.Xr chroot 2
to if ran as root or
.Xr chdir 2
to if ran as unprivileged user. Note that any accesslist files need to be
relative to and within that directory.
.It Fl u Ar user
User to run
.Nm
under after all operations that need privileges have finished.
.It Fl w Ar accesslist | Fl b Ar accesslist
If
.Nm
has been compiled with the
.B WANT_ACCESSLIST_BLACK
or
.Br WANT_ACCESSLIST_WHITE
options, this option sets the location of the accesslist.
.El
.Sh EXAMPLES
Start
.Nm
bound on UDP and TCP ports 6969 on IPv6 localhost.
.Dl # ./opentracker -i ::1 -p 6969 -P 6969
.Pp
Start
.Nm
bound on UDP port 6868 and TCP port 6868 on IPv4 localhost and allow
privileged access from the network 192.168/16 while redirecting
HTTP clients accessing the root directory, which is not covered by the
bittorrent tracker protocol, to https://my-trackersite.com/.
.Dl # ./opentracker -i 192.168.0.4 -p 6868 -P 6969 -A 192.168/16 -r https://my-trackersite.com/
The announce URLs are http://192.168.0.4:6868/announce and
udp://192.168.0.4:6868/announce respectively.
.Sh FILES
.Bl -tag -width indent
.It Pa opentracker.conf
The
.Nm
config file.
.El
.Sh SEE ALSO
.Xr opentracker.conf 4
.Pp
opentracker documentation
.Lk https://erdgeist.org/arts/software/opentracker
.Pp
Bittorrent tracker protocol
.Lk http://www.bittorrent.org/beps/bep_0015.html
.Sh AUTHOR
.An Dirk Engling
.Aq Mt erdgeist@erdgeist.org .
.Sh LICENSE
This software is released under the Beerware License:
.Pp
Permission is hereby granted, free of charge, to any person obtaining a copy of this software
and associated documentation files (the "Software"), to deal in the Software with the following
terms and conditions:
.Pp
If you meet the author(s) someday, and you think this software is worth it, you can buy them
a beer in return.

View File

@ -0,0 +1,86 @@
.Dd 2024-04-18
.Dt opentracker.conf 5
.Os Unix
.Sh NAME
.Nm opentracker.conf
.Nd configuration file for opentracker
.Sh SYNOPSIS
.Nm
.Sh DESCRIPTION
The
.Nm
configuration file specifies various options for configuring the behavior of the opentracker program.
.Pp
Lines starting with '#' are comments and are ignored. Options are specified as 'keyword value' pairs.
.Pp
The following options are available:
.Pp
.Bl -tag -width ".It access.proxy" -compact
.It listen.tcp_udp Ar address
Specifies an address opentracker will listen on for both TCP and UDP connections. If none are specified, opentracker listens on 0.0.0.0:6969 by default. Can be added more than once.
.Pp
.It listen.tcp Ar address
Specifies the address opentracker will listen on for TCP connections. Can be added more than once.
.Pp
.It listen.udp Ar address
Specifies the address opentracker will listen on for UDP connections. Can be added more than once.
.Pp
.It listen.udp.workers Ar threads
Specifies how many threads will be spawned to handle UDP connections. Defaults to 4.
.Pp
.It access.whitelist Ar path/to/whitelist
Specifies the path to the whitelist file containing all torrent hashes that opentracker will serve. Use this option if opentracker runs in a non-open mode.
.Pp
.It access.blacklist Ar path/to/blacklist
Specifies the path to the blacklist file containing all torrent hashes that opentracker will not serve. Use this option if opentracker was compiled to allow blacklisting.
.Pp
.It access.fifo_add Ar path/to/adder.fifo
Specifies the path to the FIFO (named pipe) used for dynamic changesets to accesslists. Info hashes written to this FIFO will be added to the main accesslist file.
.Pp
.It access.fifo_delete Ar path/to/deleter.fifo
Specifies the path to the FIFO (named pipe) used for dynamic changesets to accesslists. Info hashes written to this FIFO will be removed from the main accesslist file.
.Pp
.It access.stats Ar ip_address_or_network
Specifies the IP address or network in CIDR notation allowed to fetch stats from opentracker.
.Pp
.It access.stats_path Ar path
Specifies the path to the stats location. You can configure opentracker to appear anywhere on your tracker. Defaults to /stats.
.Pp
.It access.proxy Ar ip_address_or_network
Specifies the IP address or network of the reverse proxies. Opentracker will take the X-Forwarded-For address instead of the source IP address. Can be added more than once.
.Pp
.It livesync.cluster.listen Ar ip_address:port
Specifies the IP address and port opentracker will listen on for incoming live sync packets to keep a cluster of opentrackers synchronized.
.Pp
.It livesync.cluster.node_ip Ar ip_address
Specifies one trusted IP address for sync between trackers running in a cluster. Can be added more than once.
.Pp
.It batchsync.cluster.admin_ip Ar ip_address
Specifies the admin IP address for old-style (HTTP-based) asynchronous tracker syncing.
.Pp
.It tracker.rootdir Ar path
Specifies the directory opentracker will chroot/chdir to. All black/white list files must be located in this directory.
.Pp
.It tracker.user Ar username
Specifies the user opentracker will setuid to after binding to potentially privileged ports.
.Pp
.It tracker.redirect_url Ar URL
Specifies the URL opentracker will redirect to in response to a "GET / HTTP" request.
.El
.Sh EXAMPLES
To specify the address opentracker will listen on for both TCP and UDP connections:
.Dl listen.tcp_udp 0.0.0.0:6969
.Pp
To specify the address opentracker will listen on for TCP connections:
.Dl listen.tcp 0.0.0.0
.Pp
To specify the address opentracker will listen on for UDP connections:
.Dl listen.udp 0.0.0.0:6969
.Pp
.Sh SEE ALSO
.Xr opentracker 1
.Pp
.Sh AUTHOR
.An Dirk Engling
.Aq Mt erdgeist@erdgeist.org
.Pp

File diff suppressed because it is too large Load Diff

View File

@ -2,7 +2,7 @@
#
# I) Address opentracker will listen on, using both, tcp AND udp family
# (note, that port 6969 is implicite if ommitted).
# (note, that port 6969 is implicit if omitted).
#
# If no listen option is given (here or on the command line), opentracker
# listens on 0.0.0.0:6969 tcp and udp.
@ -83,15 +83,26 @@
# IIb)
# If you do not want to grant anyone access to your stats, enable the
# WANT_RESTRICT_STATS option in Makefile and bless the ip addresses
# allowed to fetch stats here.
# or network allowed to fetch stats here.
#
# access.stats 192.168.0.23
# access.stats 10.1.1.23
#
# There is another way of hiding your stats. You can obfuscate the path
# to them. Normally it is located at /stats but you can configure it to
# appear anywhere on your tracker.
#
# access.stats_path stats
#
# II
# If opentracker lives behind one or multiple reverse proxies,
# every http connection appears to come from these proxies. In order to
# take the X-Forwarded-For address instead, compile opentracker with the
# WANT_IP_FROM_PROXY option and set your proxy addresses or networkss here.
#
# access.proxy 10.0.1.23
# access.proxy 192.0.0.0/8
#
# III) Live sync uses udp multicast packets to keep a cluster of opentrackers
# synchronized. This option tells opentracker which port to listen for

View File

@ -5,34 +5,35 @@
/* System */
#include <pthread.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <signal.h>
#include <unistd.h>
#ifdef WANT_DYNAMIC_ACCESSLIST
#include <sys/types.h>
#include <sys/stat.h>
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#endif
/* Libowfat */
#include "byte.h"
#include "scan.h"
#include "fmt.h"
#include "ip6.h"
#include "mmap.h"
#include "scan.h"
/* Opentracker */
#include "trackerlogic.h"
#include "ot_accesslist.h"
#include "ot_vector.h"
#include "trackerlogic.h"
/* GLOBAL VARIABLES */
#ifdef WANT_ACCESSLIST
char *g_accesslist_filename = NULL;
char *g_accesslist_filename = NULL;
#ifdef WANT_DYNAMIC_ACCESSLIST
char *g_accesslist_pipe_add = NULL;
char *g_accesslist_pipe_delete = NULL;
char *g_accesslist_pipe_add = NULL;
char *g_accesslist_pipe_delete = NULL;
#endif
static pthread_mutex_t g_accesslist_mutex;
@ -54,20 +55,18 @@ struct ot_accesslist {
ot_time base;
ot_accesslist *next;
};
static ot_accesslist * _Atomic g_accesslist = NULL;
static ot_accesslist *_Atomic g_accesslist = NULL;
#ifdef WANT_DYNAMIC_ACCESSLIST
static ot_accesslist * _Atomic g_accesslist_add = NULL;
static ot_accesslist * _Atomic g_accesslist_delete = NULL;
static ot_accesslist *_Atomic g_accesslist_add = NULL;
static ot_accesslist *_Atomic g_accesslist_delete = NULL;
#endif
/* Helpers to work on access lists */
static int vector_compare_hash(const void *hash1, const void *hash2 ) {
return memcmp( hash1, hash2, OT_HASH_COMPARE_SIZE );
}
static int vector_compare_hash(const void *hash1, const void *hash2) { return memcmp(hash1, hash2, OT_HASH_COMPARE_SIZE); }
static ot_accesslist * accesslist_free(ot_accesslist *accesslist) {
static ot_accesslist *accesslist_free(ot_accesslist *accesslist) {
while (accesslist) {
ot_accesslist * this_accesslist = accesslist;
ot_accesslist *this_accesslist = accesslist;
accesslist = this_accesslist->next;
free(this_accesslist->list);
free(this_accesslist);
@ -75,8 +74,8 @@ static ot_accesslist * accesslist_free(ot_accesslist *accesslist) {
return NULL;
}
static ot_accesslist * accesslist_make(ot_accesslist *next, size_t size) {
ot_accesslist * accesslist_new = malloc(sizeof(ot_accesslist));
static ot_accesslist *accesslist_make(ot_accesslist *next, size_t size) {
ot_accesslist *accesslist_new = malloc(sizeof(ot_accesslist));
if (accesslist_new) {
accesslist_new->list = size ? malloc(sizeof(ot_hash) * size) : NULL;
accesslist_new->size = size;
@ -101,76 +100,77 @@ static void accesslist_clean(ot_accesslist *accesslist) {
}
/* Read initial access list */
static void accesslist_readfile( void ) {
ot_accesslist * accesslist_new;
ot_hash *info_hash;
const char *map, *map_end, *read_offs;
size_t maplen;
static void accesslist_readfile(void) {
ot_accesslist *accesslist_new;
ot_hash *info_hash;
const char *map, *map_end, *read_offs;
size_t maplen;
if( ( map = mmap_read( g_accesslist_filename, &maplen ) ) == NULL ) {
char *wd = getcwd( NULL, 0 );
fprintf( stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd );
free( wd );
if ((map = mmap_read(g_accesslist_filename, &maplen)) == NULL) {
char *wd = getcwd(NULL, 0);
fprintf(stderr, "Warning: Can't open accesslist file: %s (but will try to create it later, if necessary and possible).\nPWD: %s\n", g_accesslist_filename, wd);
free(wd);
return;
}
/* You need at least 41 bytes to pass an info_hash, make enough room
for the maximum amount of them */
accesslist_new = accesslist_make(g_accesslist, maplen / 41);
if( !accesslist_new ) {
fprintf( stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", ( maplen / 41 ) * 20 );
mmap_unmap( map, maplen);
if (!accesslist_new) {
fprintf(stderr, "Warning: Not enough memory to allocate %zd bytes for accesslist buffer. May succeed later.\n", (maplen / 41) * 20);
mmap_unmap(map, maplen);
return;
}
info_hash = accesslist_new->list;
/* No use to scan if there's not enough room for another full info_hash */
map_end = map + maplen - 40;
map_end = map + maplen - 40;
read_offs = map;
/* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*" */
while( read_offs <= map_end ) {
while (read_offs <= map_end) {
int i;
for( i=0; i<(int)sizeof(ot_hash); ++i ) {
int eger1 = scan_fromhex( (unsigned char)read_offs[ 2*i ] );
int eger2 = scan_fromhex( (unsigned char)read_offs[ 1 + 2*i ] );
if( eger1 < 0 || eger2 < 0 )
for (i = 0; i < (int)sizeof(ot_hash); ++i) {
int eger1 = scan_fromhex((unsigned char)read_offs[2 * i]);
int eger2 = scan_fromhex((unsigned char)read_offs[1 + 2 * i]);
if (eger1 < 0 || eger2 < 0)
break;
(*info_hash)[i] = (uint8_t)(eger1 * 16 + eger2);
}
if( i == sizeof(ot_hash) ) {
if (i == sizeof(ot_hash)) {
read_offs += 40;
/* Append accesslist to accesslist vector */
if( read_offs == map_end || scan_fromhex( (unsigned char)*read_offs ) < 0 )
if (read_offs == map_end || scan_fromhex((unsigned char)*read_offs) < 0)
++info_hash;
}
/* Find start of next line */
while( read_offs <= map_end && *(read_offs++) != '\n' );
while (read_offs <= map_end && *(read_offs++) != '\n')
;
}
#ifdef _DEBUG
fprintf( stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new->list) );
fprintf(stderr, "Added %zd info_hashes to accesslist\n", (size_t)(info_hash - accesslist_new->list));
#endif
mmap_unmap( map, maplen);
mmap_unmap(map, maplen);
qsort( accesslist_new->list, info_hash - accesslist_new->list, sizeof( *info_hash ), vector_compare_hash );
qsort(accesslist_new->list, info_hash - accesslist_new->list, sizeof(*info_hash), vector_compare_hash);
accesslist_new->size = info_hash - accesslist_new->list;
/* Now exchange the accesslist vector in the least race condition prone way */
pthread_mutex_lock(&g_accesslist_mutex);
accesslist_new->next = g_accesslist;
g_accesslist = accesslist_new; /* Only now set a new list */
g_accesslist = accesslist_new; /* Only now set a new list */
#ifdef WANT_DYNAMIC_ACCESSLIST
/* If we have dynamic accesslists, reloading a new one will always void the add/delete lists.
Insert empty ones at the list head */
if (g_accesslist_add && (accesslist_new = accesslist_make(g_accesslist_add, 0)) != NULL)
g_accesslist_add = accesslist_new;
g_accesslist_add = accesslist_new;
if (g_accesslist_delete && (accesslist_new = accesslist_make(g_accesslist_delete, 0)) != NULL)
g_accesslist_delete = accesslist_new;
g_accesslist_delete = accesslist_new;
#endif
accesslist_clean(g_accesslist);
@ -178,26 +178,26 @@ static void accesslist_readfile( void ) {
pthread_mutex_unlock(&g_accesslist_mutex);
}
int accesslist_hashisvalid( ot_hash hash ) {
int accesslist_hashisvalid(ot_hash hash) {
/* Get working copy of current access list */
ot_accesslist * accesslist = g_accesslist;
ot_accesslist *accesslist = g_accesslist;
#ifdef WANT_DYNAMIC_ACCESSLIST
ot_accesslist * accesslist_add, * accesslist_delete;
ot_accesslist *accesslist_add, *accesslist_delete;
#endif
void * exactmatch = NULL;
void *exactmatch = NULL;
if (accesslist)
exactmatch = bsearch( hash, accesslist->list, accesslist->size, OT_HASH_COMPARE_SIZE, vector_compare_hash );
exactmatch = bsearch(hash, accesslist->list, accesslist->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
#ifdef WANT_DYNAMIC_ACCESSLIST
/* If we had no match on the main list, scan the list of dynamically added hashes */
accesslist_add = g_accesslist_add;
if ((exactmatch == NULL) && accesslist_add)
exactmatch = bsearch( hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash );
exactmatch = bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
/* If we found a matching hash on the main list, scan the list of dynamically deleted hashes */
accesslist_delete = g_accesslist_delete;
if ((exactmatch != NULL) && accesslist_delete && bsearch( hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash ))
if ((exactmatch != NULL) && accesslist_delete && bsearch(hash, accesslist_add->list, accesslist_add->size, OT_HASH_COMPARE_SIZE, vector_compare_hash))
exactmatch = NULL;
#endif
@ -208,31 +208,32 @@ int accesslist_hashisvalid( ot_hash hash ) {
#endif
}
static void * accesslist_worker( void * args ) {
int sig;
sigset_t signal_mask;
static void *accesslist_worker(void *args) {
int sig;
sigset_t signal_mask;
sigemptyset(&signal_mask);
sigaddset(&signal_mask, SIGHUP);
(void)args;
while( 1 ) {
while (1) {
if (!g_opentracker_running)
return NULL;
return NULL;
/* Initial attempt to read accesslist */
accesslist_readfile( );
accesslist_readfile();
/* Wait for signals */
while( sigwait (&signal_mask, &sig) != 0 && sig != SIGHUP );
while (sigwait(&signal_mask, &sig) != 0 && sig != SIGHUP)
;
}
return NULL;
}
#ifdef WANT_DYNAMIC_ACCESSLIST
static pthread_t thread_adder_id, thread_deleter_id;
static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic * adding_to, ot_accesslist * _Atomic * removing_from) {
static void *accesslist_adddel_worker(char *fifoname, ot_accesslist *_Atomic *adding_to, ot_accesslist *_Atomic *removing_from) {
struct stat st;
if (!stat(fifoname, &st)) {
@ -249,9 +250,9 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic
}
while (g_opentracker_running) {
FILE * fifo = fopen(fifoname, "r");
char *line = NULL;
size_t linecap = 0;
FILE *fifo = fopen(fifoname, "r");
char *line = NULL;
size_t linecap = 0;
ssize_t linelen;
if (!fifo) {
@ -261,7 +262,7 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic
while ((linelen = getline(&line, &linecap, fifo)) > 0) {
ot_hash info_hash;
int i;
int i;
printf("Got line %*s", (int)linelen, line);
/* We do ignore anything that is not of the form "^[:xdigit:]{40}[^:xdigit:].*"
@ -269,15 +270,15 @@ static void * accesslist_adddel_worker(char * fifoname, ot_accesslist * _Atomic
if (linelen < 41)
continue;
for( i=0; i<(int)sizeof(ot_hash); ++i ) {
int eger1 = scan_fromhex( (unsigned char)line[ 2*i ] );
int eger2 = scan_fromhex( (unsigned char)line[ 1 + 2*i ] );
if( eger1 < 0 || eger2 < 0 )
for (i = 0; i < (int)sizeof(ot_hash); ++i) {
int eger1 = scan_fromhex((unsigned char)line[2 * i]);
int eger2 = scan_fromhex((unsigned char)line[1 + 2 * i]);
if (eger1 < 0 || eger2 < 0)
break;
((uint8_t*)info_hash)[i] = (uint8_t)(eger1 * 16 + eger2);
((uint8_t *)info_hash)[i] = (uint8_t)(eger1 * 16 + eger2);
}
printf("parsed info_hash %20s\n", info_hash);
if( i != sizeof(ot_hash) )
printf("parsed info_hash %20s\n", info_hash);
if (i != sizeof(ot_hash))
continue;
/* From now on we modify g_accesslist_add and g_accesslist_delete, so prevent the
@ -286,10 +287,10 @@ printf("parsed info_hash %20s\n", info_hash);
/* If the info hash is in the removing_from list, create a new head without that entry */
if (*removing_from && (*removing_from)->list) {
ot_hash * exactmatch = bsearch( info_hash, (*removing_from)->list, (*removing_from)->size, OT_HASH_COMPARE_SIZE, vector_compare_hash );
ot_hash *exactmatch = bsearch(info_hash, (*removing_from)->list, (*removing_from)->size, OT_HASH_COMPARE_SIZE, vector_compare_hash);
if (exactmatch) {
ptrdiff_t off = exactmatch - (*removing_from)->list;
ot_accesslist * accesslist_new = accesslist_make(*removing_from, (*removing_from)->size - 1);
ptrdiff_t off = exactmatch - (*removing_from)->list;
ot_accesslist *accesslist_new = accesslist_make(*removing_from, (*removing_from)->size - 1);
if (accesslist_new) {
memcpy(accesslist_new->list, (*removing_from)->list, sizeof(ot_hash) * off);
memcpy(accesslist_new->list + off, (*removing_from)->list + off + 1, (*removing_from)->size - off - 1);
@ -300,19 +301,19 @@ printf("parsed info_hash %20s\n", info_hash);
/* Simple case: there's no adding_to list yet, create one with one member */
if (!*adding_to) {
ot_accesslist * accesslist_new = accesslist_make(NULL, 1);
ot_accesslist *accesslist_new = accesslist_make(NULL, 1);
if (accesslist_new) {
memcpy(accesslist_new->list, info_hash, sizeof(ot_hash));
*adding_to = accesslist_new;
}
} else {
int exactmatch = 0;
ot_hash * insert_point = binary_search( info_hash, (*adding_to)->list, (*adding_to)->size, OT_HASH_COMPARE_SIZE, sizeof(ot_hash), &exactmatch );
int exactmatch = 0;
ot_hash *insert_point = binary_search(info_hash, (*adding_to)->list, (*adding_to)->size, OT_HASH_COMPARE_SIZE, sizeof(ot_hash), &exactmatch);
/* Only if the info hash is not in the adding_to list, create a new head with that entry */
if (!exactmatch) {
ot_accesslist * accesslist_new = accesslist_make(*adding_to, (*adding_to)->size + 1);
ptrdiff_t off = insert_point - (*adding_to)->list;
ot_accesslist *accesslist_new = accesslist_make(*adding_to, (*adding_to)->size + 1);
ptrdiff_t off = insert_point - (*adding_to)->list;
if (accesslist_new) {
memcpy(accesslist_new->list, (*adding_to)->list, sizeof(ot_hash) * off);
memcpy(accesslist_new->list + off, info_hash, sizeof(info_hash));
@ -330,29 +331,29 @@ printf("parsed info_hash %20s\n", info_hash);
return NULL;
}
static void * accesslist_adder_worker( void * args ) {
static void *accesslist_adder_worker(void *args) {
(void)args;
return accesslist_adddel_worker(g_accesslist_pipe_add, &g_accesslist_add, &g_accesslist_delete);
}
static void * accesslist_deleter_worker( void * args ) {
static void *accesslist_deleter_worker(void *args) {
(void)args;
return accesslist_adddel_worker(g_accesslist_pipe_delete, &g_accesslist_delete, &g_accesslist_add);
}
#endif
static pthread_t thread_id;
void accesslist_init( ) {
void accesslist_init() {
pthread_mutex_init(&g_accesslist_mutex, NULL);
pthread_create( &thread_id, NULL, accesslist_worker, NULL );
pthread_create(&thread_id, NULL, accesslist_worker, NULL);
#ifdef WANT_DYNAMIC_ACCESSLIST
if (g_accesslist_pipe_add)
pthread_create( &thread_adder_id, NULL, accesslist_adder_worker, NULL );
pthread_create(&thread_adder_id, NULL, accesslist_adder_worker, NULL);
if (g_accesslist_pipe_delete)
pthread_create( &thread_deleter_id, NULL, accesslist_deleter_worker, NULL );
pthread_create(&thread_deleter_id, NULL, accesslist_deleter_worker, NULL);
#endif
}
void accesslist_deinit( void ) {
void accesslist_deinit(void) {
/* Wake up sleeping worker */
pthread_kill(thread_id, SIGHUP);
@ -361,16 +362,16 @@ void accesslist_deinit( void ) {
g_accesslist = accesslist_free(g_accesslist);
#ifdef WANT_DYNAMIC_ACCESSLIST
g_accesslist_add = accesslist_free(g_accesslist_add);
g_accesslist_add = accesslist_free(g_accesslist_add);
g_accesslist_delete = accesslist_free(g_accesslist_delete);
#endif
pthread_mutex_unlock(&g_accesslist_mutex);
pthread_cancel( thread_id );
pthread_cancel(thread_id);
pthread_mutex_destroy(&g_accesslist_mutex);
}
void accesslist_cleanup( void ) {
void accesslist_cleanup(void) {
pthread_mutex_lock(&g_accesslist_mutex);
accesslist_clean(g_accesslist);
@ -383,35 +384,34 @@ void accesslist_cleanup( void ) {
}
#endif
int address_in_net( const ot_ip6 address, const ot_net *net ) {
int bits = net->bits;
int result = memcmp( address, &net->address, bits >> 3 );
if( !result && ( bits & 7 ) )
result = ( ( 0x7f00 >> ( bits & 7 ) ) & address[bits>>3] ) - net->address[bits>>3];
int address_in_net(const ot_ip6 address, const ot_net *net) {
int bits = net->bits, checkbits = (0x7f00 >> (bits & 7));
int result = memcmp(address, &net->address, bits >> 3);
if (!result && (bits & 7))
result = (checkbits & address[bits >> 3]) - (checkbits & net->address[bits >> 3]);
return result == 0;
}
void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size ) {
void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size) {
size_t i;
int exactmatch;
int exactmatch;
/* Caller must have a concept of ot_net in it's member */
if( member_size < sizeof(ot_net) )
if (member_size < sizeof(ot_net))
return 0;
/* Check each net in vector for overlap */
uint8_t *member = ((uint8_t*)vector->data);
for( i=0; i<vector->size; ++i ) {
if( address_in_net( *(ot_ip6*)member, net ) ||
address_in_net( net->address, (ot_net*)member ) )
uint8_t *member = ((uint8_t *)vector->data);
for (i = 0; i < vector->size; ++i) {
if (address_in_net(*(ot_ip6 *)member, net) || address_in_net(net->address, (ot_net *)member))
return 0;
member += member_size;
}
member = vector_find_or_insert( vector, (void*)net, member_size, sizeof(ot_net), &exactmatch );
if( member ) {
memcpy( member, net, sizeof(ot_net));
memcpy( member + sizeof(ot_net), value, member_size - sizeof(ot_net));
member = vector_find_or_insert(vector, (void *)net, member_size, sizeof(ot_net), &exactmatch);
if (member) {
memcpy(member, net, sizeof(ot_net));
memcpy(member + sizeof(ot_net), value, member_size - sizeof(ot_net));
}
return member;
@ -419,43 +419,43 @@ void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value
/* Takes a vector filled with { ot_net net, uint8_t[x] value };
Returns value associated with the net, or NULL if not found */
void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size ) {
int exactmatch;
void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size) {
int exactmatch;
/* This binary search will return a pointer to the first non-containing network... */
ot_net *net = binary_search( address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch );
if( !net )
ot_net *net = binary_search(address, vector->data, vector->size, member_size, sizeof(ot_ip6), &exactmatch);
if (!net)
return NULL;
/* ... so we'll need to move back one step unless we've exactly hit the first address in network */
if( !exactmatch && ( (void*)net > vector->data ) )
if (!exactmatch && ((void *)net > vector->data))
--net;
if( !address_in_net( address, net ) )
if (!address_in_net(address, net))
return NULL;
return (void*)net;
return (void *)net;
}
#ifdef WANT_FULLLOG_NETWORKS
static ot_vector g_lognets_list;
ot_log *g_logchain_first, *g_logchain_last;
static ot_vector g_lognets_list;
ot_log *g_logchain_first, *g_logchain_last;
static pthread_mutex_t g_lognets_list_mutex = PTHREAD_MUTEX_INITIALIZER;
void loglist_add_network( const ot_net *net ) {
void loglist_add_network(const ot_net *net) {
pthread_mutex_lock(&g_lognets_list_mutex);
set_value_for_net( net, &g_lognets_list, NULL, sizeof(ot_net));
set_value_for_net(net, &g_lognets_list, NULL, sizeof(ot_net));
pthread_mutex_unlock(&g_lognets_list_mutex);
}
void loglist_reset( ) {
void loglist_reset() {
pthread_mutex_lock(&g_lognets_list_mutex);
free( g_lognets_list.data );
free(g_lognets_list.data);
g_lognets_list.data = 0;
g_lognets_list.size = g_lognets_list.space = 0;
pthread_mutex_unlock(&g_lognets_list_mutex);
pthread_mutex_unlock(&g_lognets_list_mutex);
}
int loglist_check_address( const ot_ip6 address ) {
int loglist_check_address(const ot_ip6 address) {
int result;
pthread_mutex_lock(&g_lognets_list_mutex);
result = ( NULL != get_value_for_net( address, &g_lognets_list, sizeof(ot_net)) );
result = (NULL != get_value_for_net(address, &g_lognets_list, sizeof(ot_net)));
pthread_mutex_unlock(&g_lognets_list_mutex);
return result;
}
@ -463,44 +463,44 @@ int loglist_check_address( const ot_ip6 address ) {
#ifdef WANT_IP_FROM_PROXY
typedef struct {
ot_net *proxy;
ot_vector networks;
ot_net *proxy;
ot_vector networks;
} ot_proxymap;
static ot_vector g_proxies_list;
static ot_vector g_proxies_list;
static pthread_mutex_t g_proxies_list_mutex = PTHREAD_MUTEX_INITIALIZER;
int proxylist_add_network( const ot_net *proxy, const ot_net *net ) {
int proxylist_add_network(const ot_net *proxy, const ot_net *net) {
ot_proxymap *map;
int exactmatch, result = 1;
pthread_mutex_lock(&g_proxies_list_mutex);
/* If we have a direct hit, use and extend the vector there */
map = binary_search( proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch );
map = binary_search(proxy, g_proxies_list.data, g_proxies_list.size, sizeof(ot_proxymap), sizeof(ot_net), &exactmatch);
if( !map || !exactmatch ) {
if (!map || !exactmatch) {
/* else see, if we've got overlapping networks
and get a new empty vector if not */
ot_vector empty;
memset( &empty, 0, sizeof( ot_vector ) );
map = set_value_for_net( proxy, &g_proxies_list, &empty, sizeof(ot_proxymap));
memset(&empty, 0, sizeof(ot_vector));
map = set_value_for_net(proxy, &g_proxies_list, &empty, sizeof(ot_proxymap));
}
if( map && set_value_for_net( net, &map->networks, NULL, sizeof(ot_net) ) )
result = 1;
if (map && set_value_for_net(net, &map->networks, NULL, sizeof(ot_net)))
result = 1;
pthread_mutex_unlock(&g_proxies_list_mutex);
return result;
}
int proxylist_check_proxy( const ot_ip6 proxy, const ot_ip6 address ) {
int proxylist_check_proxy(const ot_ip6 proxy, const ot_ip6 address) {
int result = 0;
ot_proxymap *map;
pthread_mutex_lock(&g_proxies_list_mutex);
if( ( map = get_value_for_net( proxy, &g_proxies_list, sizeof(ot_proxymap) ) ) )
if( !address || get_value_for_net( address, &map->networks, sizeof(ot_net) ) )
if ((map = get_value_for_net(proxy, &g_proxies_list, sizeof(ot_proxymap))))
if (!address || get_value_for_net(address, &map->networks, sizeof(ot_net)))
result = 1;
pthread_mutex_unlock(&g_proxies_list_mutex);
@ -509,42 +509,53 @@ int proxylist_check_proxy( const ot_ip6 proxy, const ot_ip6 address ) {
#endif
static ot_ip6 g_adminip_addresses[OT_ADMINIP_MAX];
static ot_permissions g_adminip_permissions[OT_ADMINIP_MAX];
static unsigned int g_adminip_count = 0;
static ot_net g_admin_nets[OT_ADMINIP_MAX];
static ot_permissions g_admin_nets_permissions[OT_ADMINIP_MAX];
static unsigned int g_admin_nets_count = 0;
int accesslist_blessip( ot_ip6 ip, ot_permissions permissions ) {
if( g_adminip_count >= OT_ADMINIP_MAX )
int accesslist_bless_net(ot_net *net, ot_permissions permissions) {
if (g_admin_nets_count >= OT_ADMINIP_MAX)
return -1;
memcpy(g_adminip_addresses + g_adminip_count,ip,sizeof(ot_ip6));
g_adminip_permissions[ g_adminip_count++ ] = permissions;
memcpy(g_admin_nets + g_admin_nets_count, net, sizeof(ot_net));
g_admin_nets_permissions[g_admin_nets_count++] = permissions;
#ifdef _DEBUG
{
char _debug[512];
int off = snprintf( _debug, sizeof(_debug), "Blessing ip address " );
off += fmt_ip6c(_debug+off, ip );
int off = snprintf(_debug, sizeof(_debug), "Blessing ip net ");
off += fmt_ip6c(_debug + off, net->address);
if (net->bits < 128) {
_debug[off++] = '/';
if (ip6_isv4mapped(net->address))
off += fmt_long(_debug + off, net->bits - 96);
else
off += fmt_long(_debug + off, net->bits);
}
if( permissions & OT_PERMISSION_MAY_STAT ) off += snprintf( _debug+off, 512-off, " may_fetch_stats" );
if( permissions & OT_PERMISSION_MAY_LIVESYNC ) off += snprintf( _debug+off, 512-off, " may_sync_live" );
if( permissions & OT_PERMISSION_MAY_FULLSCRAPE ) off += snprintf( _debug+off, 512-off, " may_fetch_fullscrapes" );
if( permissions & OT_PERMISSION_MAY_PROXY ) off += snprintf( _debug+off, 512-off, " may_proxy" );
if( !permissions ) off += snprintf( _debug+off, sizeof(_debug)-off, " nothing\n" );
if (permissions & OT_PERMISSION_MAY_STAT)
off += snprintf(_debug + off, 512 - off, " may_fetch_stats");
if (permissions & OT_PERMISSION_MAY_LIVESYNC)
off += snprintf(_debug + off, 512 - off, " may_sync_live");
if (permissions & OT_PERMISSION_MAY_FULLSCRAPE)
off += snprintf(_debug + off, 512 - off, " may_fetch_fullscrapes");
if (permissions & OT_PERMISSION_MAY_PROXY)
off += snprintf(_debug + off, 512 - off, " may_proxy");
if (!permissions)
off += snprintf(_debug + off, sizeof(_debug) - off, " nothing");
_debug[off++] = '.';
(void)write( 2, _debug, off );
_debug[off++] = '\n';
(void)write(2, _debug, off);
}
#endif
return 0;
}
int accesslist_isblessed( ot_ip6 ip, ot_permissions permissions ) {
int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions) {
unsigned int i;
for( i=0; i<g_adminip_count; ++i )
if( !memcmp( g_adminip_addresses + i, ip, sizeof(ot_ip6)) && ( g_adminip_permissions[ i ] & permissions ) )
for (i = 0; i < g_admin_nets_count; ++i)
if (address_in_net(ip, g_admin_nets + i) && (g_admin_nets_permissions[i] & permissions))
return 1;
return 0;
}
const char *g_version_accesslist_c = "$Source$: $Revision$\n";

View File

@ -6,16 +6,18 @@
#ifndef OT_ACCESSLIST_H__
#define OT_ACCESSLIST_H__
#if defined ( WANT_ACCESSLIST_BLACK ) && defined ( WANT_ACCESSLIST_WHITE )
# error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive.
#include "trackerlogic.h"
#if defined(WANT_ACCESSLIST_BLACK) && defined(WANT_ACCESSLIST_WHITE)
#error WANT_ACCESSLIST_BLACK and WANT_ACCESSLIST_WHITE are exclusive.
#endif
#if defined ( WANT_ACCESSLIST_BLACK ) || defined (WANT_ACCESSLIST_WHITE )
#if defined(WANT_ACCESSLIST_BLACK) || defined(WANT_ACCESSLIST_WHITE)
#define WANT_ACCESSLIST
void accesslist_init( void );
void accesslist_deinit( void );
int accesslist_hashisvalid( ot_hash hash );
void accesslist_cleanup( void );
void accesslist_init(void);
void accesslist_deinit(void);
int accesslist_hashisvalid(ot_hash hash);
void accesslist_cleanup(void);
extern char *g_accesslist_filename;
#ifdef WANT_DYNAMIC_ACCESSLIST
@ -25,16 +27,16 @@ extern char *g_accesslist_pipe_delete;
#else
#ifdef WANT_DYNAMIC_ACCESSLIST
# error WANT_DYNAMIC_ACCESSLIST needs either WANT_ACCESSLIST_BLACK or WANT_ACCESSLIST_WHITE
#error WANT_DYNAMIC_ACCESSLIST needs either WANT_ACCESSLIST_BLACK or WANT_ACCESSLIST_WHITE
#endif
#define accesslist_init( accesslist_filename )
#define accesslist_deinit( )
#define accesslist_hashisvalid( hash ) 1
#define accesslist_init(accesslist_filename)
#define accesslist_deinit()
#define accesslist_hashisvalid(hash) 1
#endif
/* Test if an address is subset of an ot_net, return value is considered a bool */
int address_in_net( const ot_ip6 address, const ot_net *net );
int address_in_net(const ot_ip6 address, const ot_net *net);
/* Store a value into a vector of struct { ot_net net, uint8_t[x] value } member;
returns NULL
@ -45,18 +47,17 @@ int address_in_net( const ot_ip6 address, const ot_net *net );
returns pointer to new member in vector for success
member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping
*/
void *set_value_for_net( const ot_net *net, ot_vector *vector, const void *value, const size_t member_size );
void *set_value_for_net(const ot_net *net, ot_vector *vector, const void *value, const size_t member_size);
/* Takes a vector filled with struct { ot_net net, uint8_t[x] value } member;
Returns pointer to _member_ associated with the net, or NULL if not found
member_size can be sizeof(ot_net) to reduce the lookup to a boolean mapping
*/
void *get_value_for_net( const ot_ip6 address, const ot_vector *vector, const size_t member_size );
void *get_value_for_net(const ot_ip6 address, const ot_vector *vector, const size_t member_size);
#ifdef WANT_IP_FROM_PROXY
int proxylist_add_network( const ot_net *proxy, const ot_net *net );
int proxylist_check_network( const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */ );
int proxylist_add_network(const ot_net *proxy, const ot_net *net);
int proxylist_check_network(const ot_ip6 *proxy, const ot_ip6 address /* can be NULL to only check proxy */);
#endif
#ifdef WANT_FULLLOG_NETWORKS
@ -70,10 +71,10 @@ struct ot_log {
};
extern ot_log *g_logchain_first, *g_logchain_last;
void loglist_add_network( const ot_net *net );
void loglist_reset( );
int loglist_check_address( const ot_ip6 address );
#endif
void loglist_add_network(const ot_net *net);
void loglist_reset();
int loglist_check_address(const ot_ip6 address);
#endif
typedef enum {
OT_PERMISSION_MAY_FULLSCRAPE = 0x1,
@ -82,7 +83,7 @@ typedef enum {
OT_PERMISSION_MAY_PROXY = 0x8
} ot_permissions;
int accesslist_blessip( ot_ip6 ip, ot_permissions permissions );
int accesslist_isblessed( ot_ip6 ip, ot_permissions permissions );
int accesslist_bless_net(ot_net *net, ot_permissions permissions);
int accesslist_is_blessed(ot_ip6 ip, ot_permissions permissions);
#endif

View File

@ -5,90 +5,91 @@
/* System */
#include <pthread.h>
#include <unistd.h>
#include <string.h>
#include <unistd.h>
/* Libowfat */
#include "io.h"
/* Opentracker */
#include "trackerlogic.h"
#include "ot_mutex.h"
#include "ot_vector.h"
#include "ot_clean.h"
#include "ot_stats.h"
#include "ot_accesslist.h"
#include "ot_clean.h"
#include "ot_mutex.h"
#include "ot_stats.h"
#include "ot_vector.h"
#include "trackerlogic.h"
/* Returns amount of removed peers */
static ssize_t clean_single_bucket( ot_peer *peers, size_t peer_count, time_t timedout, int *removed_seeders ) {
ot_peer *last_peer = peers + peer_count, *insert_point;
time_t timediff;
static ssize_t clean_single_bucket(ot_peer *peers, size_t peer_count, size_t peer_size, time_t timedout, int *removed_seeders) {
ot_peer *last_peer = peers + peer_count * peer_size, *insert_point;
/* Two scan modes: unless there is one peer removed, just increase ot_peertime */
while( peers < last_peer ) {
if( ( timediff = timedout + OT_PEERTIME( peers ) ) >= OT_PEER_TIMEOUT )
while (peers < last_peer) {
time_t timediff = timedout + OT_PEERTIME(peers, peer_size);
if (timediff >= OT_PEER_TIMEOUT)
break;
OT_PEERTIME( peers++ ) = timediff;
OT_PEERTIME(peers, peer_size) = timediff;
peers += peer_size;
}
/* If we at least remove one peer, we have to copy */
insert_point = peers;
while( peers < last_peer )
if( ( timediff = timedout + OT_PEERTIME( peers ) ) < OT_PEER_TIMEOUT ) {
OT_PEERTIME( peers ) = timediff;
memcpy( insert_point++, peers++, sizeof(ot_peer));
} else
if( OT_PEERFLAG( peers++ ) & PEER_FLAG_SEEDING )
(*removed_seeders)++;
/* If we at least remove one peer, we have to copy */
for (insert_point = peers; peers < last_peer; peers += peer_size) {
time_t timediff = timedout + OT_PEERTIME(peers, peer_size);
return peers - insert_point;
if (timediff < OT_PEER_TIMEOUT) {
OT_PEERTIME(peers, peer_size) = timediff;
memcpy(insert_point, peers, peer_size);
insert_point += peer_size;
} else if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING)
(*removed_seeders)++;
}
return (peers - insert_point) / peer_size;
}
/* Clean a single torrent
return 1 if torrent timed out
*/
int clean_single_torrent( ot_torrent *torrent ) {
ot_peerlist *peer_list = torrent->peer_list;
ot_vector *bucket_list = &peer_list->peers;
time_t timedout = (time_t)( g_now_minutes - peer_list->base );
int num_buckets = 1, removed_seeders = 0;
int clean_single_peer_list(ot_peerlist *peer_list, size_t peer_size) {
ot_vector *peer_vector = &peer_list->peers;
time_t timedout = (time_t)(g_now_minutes - peer_list->base);
int num_buckets = 1, removed_seeders = 0;
/* No need to clean empty torrent */
if( !timedout )
if (!timedout)
return 0;
/* Torrent has idled out */
if( timedout > OT_TORRENT_TIMEOUT )
if (timedout > OT_TORRENT_TIMEOUT)
return 1;
/* Nothing to be cleaned here? Test if torrent is worth keeping */
if( timedout > OT_PEER_TIMEOUT ) {
if( !peer_list->peer_count )
if (timedout > OT_PEER_TIMEOUT) {
if (!peer_list->peer_count)
return peer_list->down_count ? 0 : 1;
timedout = OT_PEER_TIMEOUT;
}
if( OT_PEERLIST_HASBUCKETS( peer_list ) ) {
num_buckets = bucket_list->size;
bucket_list = (ot_vector *)bucket_list->data;
if (OT_PEERLIST_HASBUCKETS(peer_list)) {
num_buckets = peer_vector->size;
peer_vector = (ot_vector *)peer_vector->data;
}
while( num_buckets-- ) {
size_t removed_peers = clean_single_bucket( bucket_list->data, bucket_list->size, timedout, &removed_seeders );
while (num_buckets--) {
size_t removed_peers = clean_single_bucket(peer_vector->data, peer_vector->size, peer_size, timedout, &removed_seeders);
peer_list->peer_count -= removed_peers;
bucket_list->size -= removed_peers;
if( bucket_list->size < removed_peers )
vector_fixup_peers( bucket_list );
++bucket_list;
peer_vector->size -= removed_peers;
if (removed_peers)
vector_fixup_peers(peer_vector, peer_size);
/* Skip to next bucket, a vector containing peers */
++peer_vector;
}
peer_list->seed_count -= removed_seeders;
/* See, if we need to convert a torrent from simple vector to bucket list */
if( ( peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT ) || OT_PEERLIST_HASBUCKETS(peer_list) )
vector_redistribute_buckets( peer_list );
/* See if we need to convert a torrent from simple vector to bucket list */
if ((peer_list->peer_count > OT_PEER_BUCKET_MINCOUNT) || OT_PEERLIST_HASBUCKETS(peer_list))
vector_redistribute_buckets(peer_list, peer_size);
if( peer_list->peer_count )
if (peer_list->peer_count)
peer_list->base = g_now_minutes;
else {
/* When we got here, the last time that torrent
@ -96,32 +97,38 @@ int clean_single_torrent( ot_torrent *torrent ) {
peer_list->base = g_now_minutes - OT_PEER_TIMEOUT;
}
return 0;
}
/* Clean a single torrent
return 1 if torrent timed out
*/
int clean_single_torrent(ot_torrent *torrent) {
return clean_single_peer_list(torrent->peer_list6, OT_PEER_SIZE6) * clean_single_peer_list(torrent->peer_list4, OT_PEER_SIZE4);
}
/* Clean up all peers in current bucket, remove timedout pools and
torrents */
static void * clean_worker( void * args ) {
(void) args;
while( 1 ) {
static void *clean_worker(void *args) {
(void)args;
while (1) {
int bucket = OT_BUCKET_COUNT;
while( bucket-- ) {
ot_vector *torrents_list = mutex_bucket_lock( bucket );
while (bucket--) {
ot_vector *torrents_list = mutex_bucket_lock(bucket);
size_t toffs;
int delta_torrentcount = 0;
for( toffs=0; toffs<torrents_list->size; ++toffs ) {
ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + toffs;
if( clean_single_torrent( torrent ) ) {
vector_remove_torrent( torrents_list, torrent );
for (toffs = 0; toffs < torrents_list->size; ++toffs) {
ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + toffs;
if (clean_single_torrent(torrent)) {
vector_remove_torrent(torrents_list, torrent);
--delta_torrentcount;
--toffs;
}
}
mutex_bucket_unlock( bucket, delta_torrentcount );
if( !g_opentracker_running )
mutex_bucket_unlock(bucket, delta_torrentcount);
if (!g_opentracker_running)
return NULL;
usleep( OT_CLEAN_SLEEP );
usleep(OT_CLEAN_SLEEP);
}
stats_cleanup();
#ifdef WANT_ACCESSLIST
@ -132,12 +139,6 @@ static void * clean_worker( void * args ) {
}
static pthread_t thread_id;
void clean_init( void ) {
pthread_create( &thread_id, NULL, clean_worker, NULL );
}
void clean_init(void) { pthread_create(&thread_id, NULL, clean_worker, NULL); }
void clean_deinit( void ) {
pthread_cancel( thread_id );
}
const char *g_version_clean_c = "$Source$: $Revision$\n";
void clean_deinit(void) { pthread_cancel(thread_id); }

View File

@ -7,13 +7,13 @@
#define OT_CLEAN_H__
/* The amount of time a clean cycle should take */
#define OT_CLEAN_INTERVAL_MINUTES 2
#define OT_CLEAN_INTERVAL_MINUTES 2
/* So after each bucket wait 1 / OT_BUCKET_COUNT intervals */
#define OT_CLEAN_SLEEP ( ( ( OT_CLEAN_INTERVAL_MINUTES ) * 60 * 1000000 ) / ( OT_BUCKET_COUNT ) )
#define OT_CLEAN_SLEEP (((OT_CLEAN_INTERVAL_MINUTES) * 60 * 1000000) / (OT_BUCKET_COUNT))
void clean_init( void );
void clean_deinit( void );
int clean_single_torrent( ot_torrent *torrent );
void clean_init(void);
void clean_deinit(void);
int clean_single_torrent(ot_torrent *torrent);
#endif

View File

@ -6,14 +6,18 @@
#ifdef WANT_FULLSCRAPE
/* System */
#include <sys/param.h>
#include <arpa/inet.h>
#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <pthread.h>
#include <arpa/inet.h>
#include <sys/param.h>
#ifdef WANT_COMPRESSION_GZIP
#include <zlib.h>
#endif
#ifdef WANT_COMPRESSION_ZSTD
#include <zstd.h>
#endif
/* Libowfat */
#include "byte.h"
@ -21,50 +25,64 @@
#include "textcode.h"
/* Opentracker */
#include "trackerlogic.h"
#include "ot_mutex.h"
#include "ot_iovec.h"
#include "ot_fullscrape.h"
#include "ot_iovec.h"
#include "ot_mutex.h"
#include "trackerlogic.h"
/* Fetch full scrape info for all torrents
Full scrapes usually are huge and one does not want to
allocate more memory. So lets get them in 512k units
*/
#define OT_SCRAPE_CHUNK_SIZE (1024*1024)
#define OT_SCRAPE_CHUNK_SIZE (1024 * 1024)
/* "d8:completei%zde10:downloadedi%zde10:incompletei%zdee" */
#define OT_SCRAPE_MAXENTRYLEN 256
/* Forward declaration */
static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode );
static void fullscrape_make(int taskid, ot_tasktype mode);
#ifdef WANT_COMPRESSION_GZIP
static void fullscrape_make_gzip( int *iovec_entries, struct iovec **iovector, ot_tasktype mode );
static void fullscrape_make_gzip(int taskid, ot_tasktype mode);
#endif
#ifdef WANT_COMPRESSION_ZSTD
static void fullscrape_make_zstd(int taskid, ot_tasktype mode);
#endif
/* Converter function from memory to human readable hex strings
XXX - Duplicated from ot_stats. Needs fix. */
static char*to_hex(char*d,uint8_t*s){char*m="0123456789ABCDEF";char *t=d;char*e=d+40;while(d<e){*d++=m[*s>>4];*d++=m[*s++&15];}*d=0;return t;}
static char *to_hex(char *d, uint8_t *s) {
char *m = "0123456789ABCDEF";
char *t = d;
char *e = d + 40;
while (d < e) {
*d++ = m[*s >> 4];
*d++ = m[*s++ & 15];
}
*d = 0;
return t;
}
/* This is the entry point into this worker thread
It grabs tasks from mutex_tasklist and delivers results back
*/
static void * fullscrape_worker( void * args ) {
int iovec_entries;
struct iovec *iovector;
static void *fullscrape_worker(void *args) {
(void)args;
(void) args;
while( g_opentracker_running ) {
while (g_opentracker_running) {
ot_tasktype tasktype = TASK_FULLSCRAPE;
ot_taskid taskid = mutex_workqueue_poptask( &tasktype );
#ifdef WANT_COMPRESSION_GZIP
if (tasktype & TASK_FLAG_GZIP)
fullscrape_make_gzip( &iovec_entries, &iovector, tasktype );
ot_taskid taskid = mutex_workqueue_poptask(&tasktype);
#ifdef WANT_COMPRESSION_ZSTD
if (tasktype & TASK_FLAG_ZSTD)
fullscrape_make_zstd(taskid, tasktype);
else
#endif
fullscrape_make( &iovec_entries, &iovector, tasktype );
if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) )
iovec_free( &iovec_entries, &iovector );
#ifdef WANT_COMPRESSION_GZIP
if (tasktype & TASK_FLAG_GZIP)
fullscrape_make_gzip(taskid, tasktype);
else
#endif
fullscrape_make(taskid, tasktype);
mutex_workqueue_pushchunked(taskid, NULL);
}
return NULL;
}
@ -82,76 +100,92 @@ void fullscrape_deliver( int64 sock, ot_tasktype tasktype ) {
mutex_workqueue_pushtask( sock, tasktype );
}
static char * fullscrape_write_one( ot_tasktype mode, char *r, ot_peerlist *peer_list, ot_hash *hash ) {
switch( mode & TASK_TASK_MASK ) {
case TASK_FULLSCRAPE:
default:
/* push hash as bencoded string */
*r++='2'; *r++='0'; *r++=':';
memcpy( r, hash, sizeof(ot_hash) ); r += sizeof(ot_hash);
/* push rest of the scrape string */
r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count );
static char * fullscrape_write_one( ot_tasktype mode, char *r, ot_torrent *torrent, ot_hash *hash ) {
size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count;
break;
case TASK_FULLSCRAPE_TPB_ASCII:
to_hex( r, *hash ); r+= 2 * sizeof(ot_hash);
r += sprintf( r, ":%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count );
break;
case TASK_FULLSCRAPE_TPB_ASCII_PLUS:
to_hex( r, *hash ); r+= 2 * sizeof(ot_hash);
r += sprintf( r, ":%zd:%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count, peer_list->down_count );
break;
case TASK_FULLSCRAPE_TPB_BINARY:
memcpy( r, *hash, sizeof(ot_hash) ); r += sizeof(ot_hash);
*(uint32_t*)(r+0) = htonl( (uint32_t) peer_list->seed_count );
*(uint32_t*)(r+4) = htonl( (uint32_t)( peer_list->peer_count-peer_list->seed_count) );
r+=8;
break;
case TASK_FULLSCRAPE_TPB_URLENCODED:
r += fmt_urlencoded( r, (char *)*hash, 20 );
r += sprintf( r, ":%zd:%zd\n", peer_list->seed_count, peer_list->peer_count-peer_list->seed_count );
break;
case TASK_FULLSCRAPE_TRACKERSTATE:
to_hex( r, *hash ); r+= 2 * sizeof(ot_hash);
r += sprintf( r, ":%zd:%zd\n", peer_list->base, peer_list->down_count );
break;
}
return r;
switch (mode & TASK_TASK_MASK) {
case TASK_FULLSCRAPE:
default:
/* push hash as bencoded string */
*r++ = '2';
*r++ = '0';
*r++ = ':';
memcpy(r, hash, sizeof(ot_hash));
r += sizeof(ot_hash);
/* push rest of the scrape string */
r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", seed_count, down_count, peer_count - seed_count);
break;
case TASK_FULLSCRAPE_TPB_ASCII:
to_hex(r, *hash);
r += 2 * sizeof(ot_hash);
r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count);
break;
case TASK_FULLSCRAPE_TPB_ASCII_PLUS:
to_hex(r, *hash);
r += 2 * sizeof(ot_hash);
r += sprintf(r, ":%zd:%zd:%zd\n", seed_count, peer_count - seed_count, down_count);
break;
case TASK_FULLSCRAPE_TPB_BINARY:
memcpy(r, *hash, sizeof(ot_hash));
r += sizeof(ot_hash);
*(uint32_t *)(r + 0) = htonl((uint32_t)seed_count);
*(uint32_t *)(r + 4) = htonl((uint32_t)(peer_count - seed_count));
r += 8;
break;
case TASK_FULLSCRAPE_TPB_URLENCODED:
r += fmt_urlencoded(r, (char *)*hash, 20);
r += sprintf(r, ":%zd:%zd\n", seed_count, peer_count - seed_count);
break;
case TASK_FULLSCRAPE_TRACKERSTATE:
to_hex(r, *hash);
r += 2 * sizeof(ot_hash);
r += sprintf(r, ":%zd:%zd\n", torrent->peer_list6->base, down_count);
break;
}
return r;
}
static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) {
int bucket;
char *r, *re;
static void fullscrape_make(int taskid, ot_tasktype mode) {
int bucket;
char *r, *re;
struct iovec iovector = {NULL, 0};
/* Setup return vector... */
*iovec_entries = 0;
*iovector = NULL;
r = iovec_increase( iovec_entries, iovector, OT_SCRAPE_CHUNK_SIZE );
if( !r )
r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
if (!r)
return;
/* re points to low watermark */
re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN;
if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE )
r += sprintf( r, "d5:filesd" );
if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE)
r += sprintf(r, "d5:filesd");
/* For each bucket... */
for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) {
for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
/* Get exclusive access to that bucket */
ot_vector *torrents_list = mutex_bucket_lock( bucket );
ot_torrent *torrents = (ot_torrent*)(torrents_list->data);
size_t i;
ot_vector *torrents_list = mutex_bucket_lock(bucket);
ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
size_t i;
/* For each torrent in this bucket.. */
for( i=0; i<torrents_list->size; ++i ) {
r = fullscrape_write_one( mode, r, torrents[i].peer_list, &torrents[i].hash );
for (i = 0; i < torrents_list->size; ++i) {
r = fullscrape_write_one(mode, r, torrents + i, &torrents[i].hash);
if( r > re) {
/* Allocate a fresh output buffer at the end of our buffers list */
r = iovec_fix_increase_or_free( iovec_entries, iovector, r, OT_SCRAPE_CHUNK_SIZE );
if( !r )
return mutex_bucket_unlock( bucket, 0 );
if (r > re) {
iovector.iov_len = r - (char *)iovector.iov_base;
if (mutex_workqueue_pushchunked(taskid, &iovector)) {
free(iovector.iov_base);
return mutex_bucket_unlock(bucket, 0);
}
/* Allocate a fresh output buffer */
r = iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
if (!r)
return mutex_bucket_unlock(bucket, 0);
/* re points to low watermark */
re = r + OT_SCRAPE_CHUNK_SIZE - OT_SCRAPE_MAXENTRYLEN;
@ -159,125 +193,265 @@ static void fullscrape_make( int *iovec_entries, struct iovec **iovector, ot_tas
}
/* All torrents done: release lock on current bucket */
mutex_bucket_unlock( bucket, 0 );
mutex_bucket_unlock(bucket, 0);
/* Parent thread died? */
if( !g_opentracker_running )
if (!g_opentracker_running)
return;
}
if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE )
r += sprintf( r, "ee" );
if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE)
r += sprintf(r, "ee");
/* Release unused memory in current output buffer */
iovec_fixlast( iovec_entries, iovector, r );
/* Send rest of data */
iovector.iov_len = r - (char *)iovector.iov_base;
if (mutex_workqueue_pushchunked(taskid, &iovector))
free(iovector.iov_base);
}
#ifdef WANT_COMPRESSION_GZIP
static void fullscrape_make_gzip( int *iovec_entries, struct iovec **iovector, ot_tasktype mode ) {
int bucket;
char *r;
int zres;
z_stream strm;
static void fullscrape_make_gzip(int taskid, ot_tasktype mode) {
int bucket;
char *r;
struct iovec iovector = {NULL, 0};
int zres;
z_stream strm;
/* Setup return vector... */
*iovec_entries = 0;
*iovector = NULL;
r = iovec_increase( iovec_entries, iovector, OT_SCRAPE_CHUNK_SIZE );
if( !r )
iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
if (!iovector.iov_base)
return;
byte_zero( &strm, sizeof(strm) );
strm.next_out = (uint8_t*)r;
byte_zero(&strm, sizeof(strm));
strm.next_out = (uint8_t *)iovector.iov_base;
strm.avail_out = OT_SCRAPE_CHUNK_SIZE;
if( deflateInit2(&strm,7,Z_DEFLATED,31,9,Z_DEFAULT_STRATEGY) != Z_OK )
fprintf( stderr, "not ok.\n" );
if (deflateInit2(&strm, 7, Z_DEFLATED, 31, 9, Z_DEFAULT_STRATEGY) != Z_OK)
fprintf(stderr, "not ok.\n");
if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) {
strm.next_in = (uint8_t*)"d5:filesd";
strm.avail_in = strlen("d5:filesd");
zres = deflate( &strm, Z_NO_FLUSH );
if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
strm.next_in = (uint8_t *)"d5:filesd";
strm.avail_in = strlen("d5:filesd");
zres = deflate(&strm, Z_NO_FLUSH);
}
/* For each bucket... */
for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) {
for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
/* Get exclusive access to that bucket */
ot_vector *torrents_list = mutex_bucket_lock( bucket );
ot_torrent *torrents = (ot_torrent*)(torrents_list->data);
size_t i;
ot_vector *torrents_list = mutex_bucket_lock(bucket);
ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
size_t i;
/* For each torrent in this bucket.. */
for( i=0; i<torrents_list->size; ++i ) {
for (i = 0; i < torrents_list->size; ++i) {
char compress_buffer[OT_SCRAPE_MAXENTRYLEN];
r = fullscrape_write_one( mode, compress_buffer, torrents[i].peer_list, &torrents[i].hash );
strm.next_in = (uint8_t*)compress_buffer;
strm.avail_in = r - compress_buffer;
zres = deflate( &strm, Z_NO_FLUSH );
if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) )
fprintf( stderr, "deflate() failed while in fullscrape_make().\n" );
r = fullscrape_write_one(mode, compress_buffer, torrents + i, &torrents[i].hash);
strm.next_in = (uint8_t *)compress_buffer;
strm.avail_in = r - compress_buffer;
zres = deflate(&strm, Z_NO_FLUSH);
if ((zres < Z_OK) && (zres != Z_BUF_ERROR))
fprintf(stderr, "deflate() failed while in fullscrape_make().\n");
/* Check if there still is enough buffer left */
while( !strm.avail_out ) {
/* Allocate a fresh output buffer at the end of our buffers list */
r = iovec_increase( iovec_entries, iovector, OT_SCRAPE_CHUNK_SIZE );
if( !r ) {
fprintf( stderr, "Problem with iovec_fix_increase_or_free\n" );
iovec_free( iovec_entries, iovector );
deflateEnd(&strm);
return mutex_bucket_unlock( bucket, 0 );
while (!strm.avail_out) {
iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base;
if (mutex_workqueue_pushchunked(taskid, &iovector)) {
free(iovector.iov_base);
return mutex_bucket_unlock(bucket, 0);
}
strm.next_out = (uint8_t*)r;
/* Allocate a fresh output buffer */
iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
if (!iovector.iov_base) {
fprintf(stderr, "Out of memory trying to claim ouput buffer\n");
deflateEnd(&strm);
return mutex_bucket_unlock(bucket, 0);
}
strm.next_out = (uint8_t *)iovector.iov_base;
strm.avail_out = OT_SCRAPE_CHUNK_SIZE;
zres = deflate( &strm, Z_NO_FLUSH );
if( ( zres < Z_OK ) && ( zres != Z_BUF_ERROR ) )
fprintf( stderr, "deflate() failed while in fullscrape_make().\n" );
zres = deflate(&strm, Z_NO_FLUSH);
if ((zres < Z_OK) && (zres != Z_BUF_ERROR))
fprintf(stderr, "deflate() failed while in fullscrape_make().\n");
}
}
/* All torrents done: release lock on current bucket */
mutex_bucket_unlock( bucket, 0 );
mutex_bucket_unlock(bucket, 0);
/* Parent thread died? */
if( !g_opentracker_running )
if (!g_opentracker_running) {
deflateEnd(&strm);
return;
}
}
if( ( mode & TASK_TASK_MASK ) == TASK_FULLSCRAPE ) {
strm.next_in = (uint8_t*)"ee";
if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
strm.next_in = (uint8_t *)"ee";
strm.avail_in = strlen("ee");
}
if( deflate( &strm, Z_FINISH ) < Z_OK )
fprintf( stderr, "deflate() failed while in fullscrape_make()'s endgame.\n" );
if (deflate(&strm, Z_FINISH) < Z_OK)
fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n");
if( !strm.avail_out ) {
unsigned int pending;
int bits;
deflatePending( &strm, &pending, &bits);
pending += ( bits ? 1 : 0 );
/* Allocate a fresh output buffer at the end of our buffers list */
r = iovec_fix_increase_or_free( iovec_entries, iovector, strm.next_out, pending );
if( !r ) {
fprintf( stderr, "Problem with iovec_fix_increase_or_free\n" );
deflateEnd(&strm);
return mutex_bucket_unlock( bucket, 0 );
}
strm.next_out = (uint8_t*)r;
strm.avail_out = pending;
if( deflate( &strm, Z_FINISH ) < Z_OK )
fprintf( stderr, "deflate() failed while in fullscrape_make()'s endgame.\n" );
iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base;
if (mutex_workqueue_pushchunked(taskid, &iovector)) {
free(iovector.iov_base);
deflateEnd(&strm);
return;
}
/* Release unused memory in current output buffer */
iovec_fixlast( iovec_entries, iovector, strm.next_out );
/* Check if there's a last batch of data in the zlib buffer */
if (!strm.avail_out) {
/* Allocate a fresh output buffer */
iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
if (!iovector.iov_base) {
fprintf(stderr, "Problem with iovec_fix_increase_or_free\n");
deflateEnd(&strm);
return;
}
strm.next_out = iovector.iov_base;
strm.avail_out = OT_SCRAPE_CHUNK_SIZE;
if (deflate(&strm, Z_FINISH) < Z_OK)
fprintf(stderr, "deflate() failed while in fullscrape_make()'s endgame.\n");
/* Only pass the new buffer if there actually was some data left in the buffer */
iovector.iov_len = (char *)strm.next_out - (char *)iovector.iov_base;
if (!iovector.iov_len || mutex_workqueue_pushchunked(taskid, &iovector))
free(iovector.iov_base);
}
deflateEnd(&strm);
}
/* WANT_COMPRESSION_GZIP */
#endif
#ifdef WANT_COMPRESSION_ZSTD
static void fullscrape_make_zstd(int taskid, ot_tasktype mode) {
int bucket;
char *r;
struct iovec iovector = {NULL, 0};
ZSTD_CCtx *zstream = ZSTD_createCCtx();
ZSTD_inBuffer inbuf;
ZSTD_outBuffer outbuf;
size_t more_bytes;
if (!zstream)
return;
/* Setup return vector... */
iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
if (!iovector.iov_base) {
ZSTD_freeCCtx(zstream);
return;
}
/* Working with a compression level 6 is half as fast as level 3, but
seems to be the last reasonable bump that's worth extra cpu */
ZSTD_CCtx_setParameter(zstream, ZSTD_c_compressionLevel, 6);
outbuf.dst = iovector.iov_base;
outbuf.size = OT_SCRAPE_CHUNK_SIZE;
outbuf.pos = 0;
if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
inbuf.src = (const void *)"d5:filesd";
inbuf.size = strlen("d5:filesd");
inbuf.pos = 0;
ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue);
}
/* For each bucket... */
for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
/* Get exclusive access to that bucket */
ot_vector *torrents_list = mutex_bucket_lock(bucket);
ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
size_t i;
/* For each torrent in this bucket.. */
for (i = 0; i < torrents_list->size; ++i) {
char compress_buffer[OT_SCRAPE_MAXENTRYLEN];
r = fullscrape_write_one(mode, compress_buffer, torrents + i, &torrents[i].hash);
inbuf.src = compress_buffer;
inbuf.size = r - compress_buffer;
inbuf.pos = 0;
ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue);
/* Check if there still is enough buffer left */
while (outbuf.pos + OT_SCRAPE_MAXENTRYLEN > outbuf.size) {
iovector.iov_len = outbuf.size;
if (mutex_workqueue_pushchunked(taskid, &iovector)) {
free(iovector.iov_base);
ZSTD_freeCCtx(zstream);
return mutex_bucket_unlock(bucket, 0);
}
/* Allocate a fresh output buffer */
iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
if (!iovector.iov_base) {
fprintf(stderr, "Out of memory trying to claim ouput buffer\n");
ZSTD_freeCCtx(zstream);
return mutex_bucket_unlock(bucket, 0);
}
outbuf.dst = iovector.iov_base;
outbuf.size = OT_SCRAPE_CHUNK_SIZE;
outbuf.pos = 0;
ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_continue);
}
}
/* All torrents done: release lock on current bucket */
mutex_bucket_unlock(bucket, 0);
/* Parent thread died? */
if (!g_opentracker_running)
return;
}
if ((mode & TASK_TASK_MASK) == TASK_FULLSCRAPE) {
inbuf.src = (const void *)"ee";
inbuf.size = strlen("ee");
inbuf.pos = 0;
}
more_bytes = ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_end);
iovector.iov_len = outbuf.pos;
if (mutex_workqueue_pushchunked(taskid, &iovector)) {
free(iovector.iov_base);
ZSTD_freeCCtx(zstream);
return;
}
/* Check if there's a last batch of data in the zlib buffer */
if (more_bytes) {
/* Allocate a fresh output buffer */
iovector.iov_base = malloc(OT_SCRAPE_CHUNK_SIZE);
if (!iovector.iov_base) {
fprintf(stderr, "Problem with iovec_fix_increase_or_free\n");
ZSTD_freeCCtx(zstream);
return;
}
outbuf.dst = iovector.iov_base;
outbuf.size = OT_SCRAPE_CHUNK_SIZE;
outbuf.pos = 0;
ZSTD_compressStream2(zstream, &outbuf, &inbuf, ZSTD_e_end);
/* Only pass the new buffer if there actually was some data left in the buffer */
iovector.iov_len = outbuf.pos;
if (!iovector.iov_len || mutex_workqueue_pushchunked(taskid, &iovector))
free(iovector.iov_base);
}
ZSTD_freeCCtx(zstream);
}
/* WANT_COMPRESSION_ZSTD */
#endif
/* WANT_FULLSCRAPE */
#endif
const char *g_version_fullscrape_c = "$Source$: $Revision$\n";

View File

@ -8,9 +8,11 @@
#ifdef WANT_FULLSCRAPE
void fullscrape_init( );
void fullscrape_deinit( );
void fullscrape_deliver( int64 sock, ot_tasktype tasktype );
#include "ot_mutex.h"
void fullscrape_init();
void fullscrape_deinit();
void fullscrape_deliver(int64 sock, ot_tasktype tasktype);
#else

850
ot_http.c

File diff suppressed because it is too large Load Diff

View File

@ -7,9 +7,12 @@
#define OT_HTTP_H__
typedef enum {
STRUCT_HTTP_FLAG_WAITINGFORTASK = 1,
STRUCT_HTTP_FLAG_GZIP = 2,
STRUCT_HTTP_FLAG_BZIP2 = 4
STRUCT_HTTP_FLAG_WAITINGFORTASK = 1,
STRUCT_HTTP_FLAG_GZIP = 2,
STRUCT_HTTP_FLAG_BZIP2 = 4,
STRUCT_HTTP_FLAG_ZSTD = 8,
STRUCT_HTTP_FLAG_CHUNKED = 16,
STRUCT_HTTP_FLAG_CHUNKED_IN_TRANSFER = 32
} STRUCT_HTTP_FLAG;
struct http_data {
@ -20,9 +23,9 @@ struct http_data {
STRUCT_HTTP_FLAG flag;
};
ssize_t http_handle_request( const int64 s, struct ot_workstruct *ws );
ssize_t http_sendiovecdata( const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector );
ssize_t http_issue_error( const int64 s, struct ot_workstruct *ws, int code );
ssize_t http_handle_request(const int64 s, struct ot_workstruct *ws);
ssize_t http_sendiovecdata(const int64 s, struct ot_workstruct *ws, int iovec_entries, struct iovec *iovector, int is_partial);
ssize_t http_issue_error(const int64 s, struct ot_workstruct *ws, int code);
extern char *g_stats_path;
extern ssize_t g_stats_path_len;

View File

@ -4,73 +4,89 @@
$id$ */
/* System */
#include <sys/types.h>
#include <stdlib.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <unistd.h>
/* Libowfat */
/* Opentracker */
#include "ot_iovec.h"
void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc ) {
void *new_data;
int new_entries = 1 + *iovec_entries;
struct iovec *new_vec = realloc( *iovector, new_entries * sizeof( struct iovec ) );
void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc) {
void *new_data;
int new_entries = 1 + *iovec_entries;
struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec));
if( !new_vec )
if (!new_vec)
return NULL;
/* Only allocate after we have a place to store the pointer */
new_data = malloc( new_alloc );
if( !new_data )
new_data = malloc(new_alloc);
if (!new_data)
return NULL;
new_vec[new_entries - 1].iov_base = new_data;
new_vec[new_entries - 1].iov_len = new_alloc;
*iovector = new_vec;
*iovector = new_vec;
++*iovec_entries;
return new_data;
}
void iovec_free( int *iovec_entries, struct iovec **iovector ) {
void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector) {
int new_entries = *iovec_entries + 1;
struct iovec *new_vec = realloc(*iovector, new_entries * sizeof(struct iovec));
if (!new_vec)
return NULL;
/* Take over data from appended iovec */
new_vec[*iovec_entries].iov_base = append_iovector->iov_base;
new_vec[*iovec_entries].iov_len = append_iovector->iov_len;
append_iovector->iov_base = NULL;
append_iovector->iov_len = 0;
*iovector = new_vec;
*iovec_entries = new_entries;
return new_vec;
}
void iovec_free(int *iovec_entries, struct iovec **iovector) {
int i;
for( i=0; i<*iovec_entries; ++i )
free( ((*iovector)[i]).iov_base );
*iovector = NULL;
for (i = 0; i < *iovec_entries; ++i)
free(((*iovector)[i]).iov_base);
*iovector = NULL;
*iovec_entries = 0;
}
void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr ) {
if( *iovec_entries ) {
char * base = (char*)((*iovector)[ *iovec_entries - 1 ]).iov_base;
size_t new_alloc = ((char*)last_ptr) - base;
void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr) {
if (*iovec_entries) {
char *base = (char *)((*iovector)[*iovec_entries - 1]).iov_base;
size_t new_alloc = ((char *)last_ptr) - base;
((*iovector)[*iovec_entries - 1 ]).iov_base = realloc( base, new_alloc );
((*iovector)[*iovec_entries - 1 ]).iov_len = new_alloc;
((*iovector)[*iovec_entries - 1]).iov_base = realloc(base, new_alloc);
((*iovector)[*iovec_entries - 1]).iov_len = new_alloc;
}
}
void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc ) {
void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc) {
void *new_data;
iovec_fixlast( iovec_entries, iovector, last_ptr );
iovec_fixlast(iovec_entries, iovector, last_ptr);
if( !( new_data = iovec_increase( iovec_entries, iovector, new_alloc ) ) )
iovec_free( iovec_entries, iovector );
if (!(new_data = iovec_increase(iovec_entries, iovector, new_alloc)))
iovec_free(iovec_entries, iovector);
return new_data;
}
size_t iovec_length( const int *iovec_entries, const struct iovec **iovector ) {
size_t iovec_length(const int *iovec_entries, const struct iovec **iovector) {
size_t length = 0;
int i;
for( i=0; i<*iovec_entries; ++i )
int i;
for (i = 0; i < *iovec_entries; ++i)
length += ((*iovector)[i]).iov_len;
return length;
}
const char *g_version_iovec_c = "$Source$: $Revision$\n";

View File

@ -8,12 +8,13 @@
#include <sys/uio.h>
void *iovec_increase( int *iovec_entries, struct iovec **iovector, size_t new_alloc );
void iovec_fixlast( int *iovec_entries, struct iovec **iovector, void *last_ptr );
void iovec_free( int *iovec_entries, struct iovec **iovector );
void *iovec_increase(int *iovec_entries, struct iovec **iovector, size_t new_alloc);
void *iovec_append(int *iovec_entries, struct iovec **iovector, struct iovec *append_iovector);
void iovec_fixlast(int *iovec_entries, struct iovec **iovector, void *last_ptr);
void iovec_free(int *iovec_entries, struct iovec **iovector);
size_t iovec_length( const int *iovec_entries, const struct iovec **iovector );
size_t iovec_length(const int *iovec_entries, const struct iovec **iovector);
void *iovec_fix_increase_or_free( int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc );
void *iovec_fix_increase_or_free(int *iovec_entries, struct iovec **iovector, void *last_ptr, size_t new_alloc);
#endif

View File

@ -4,204 +4,228 @@
$id$ */
/* System */
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <sys/types.h>
#include <sys/uio.h>
#include <string.h>
#include <pthread.h>
#include <unistd.h>
#include <stdlib.h>
/* Libowfat */
#include "socket.h"
#include "ndelay.h"
#include "byte.h"
#include "ip6.h"
#include "ndelay.h"
#include "socket.h"
/* Opentracker */
#include "trackerlogic.h"
#include "ot_livesync.h"
#include "ot_accesslist.h"
#include "ot_stats.h"
#include "ot_livesync.h"
#include "ot_mutex.h"
#include "ot_stats.h"
#include "trackerlogic.h"
#ifdef WANT_SYNC_LIVE
char groupip_1[4] = { 224,0,23,5 };
char groupip_1[4] = {224, 0, 23, 5};
#define LIVESYNC_INCOMING_BUFFSIZE (256*256)
#define LIVESYNC_INCOMING_BUFFSIZE (256 * 256)
#define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480
#define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer)+sizeof(ot_hash))
#define LIVESYNC_OUTGOING_BUFFSIZE_PEERS 1480
#define LIVESYNC_OUTGOING_WATERMARK_PEERS (sizeof(ot_peer) + sizeof(ot_hash))
#define LIVESYNC_MAXDELAY 15 /* seconds */
#define LIVESYNC_MAXDELAY 15 /* seconds */
enum { OT_SYNC_PEER };
enum { OT_SYNC_PEER4, OT_SYNC_PEER6 };
/* Forward declaration */
static void * livesync_worker( void * args );
static void *livesync_worker(void *args);
/* For outgoing packets */
static int64 g_socket_in = -1;
static int64 g_socket_in = -1;
/* For incoming packets */
static int64 g_socket_out = -1;
static int64 g_socket_out = -1;
static pthread_mutex_t g_outbuf_mutex = PTHREAD_MUTEX_INITIALIZER;
char g_outbuf[LIVESYNC_OUTGOING_BUFFSIZE_PEERS];
static size_t g_outbuf_data;
static ot_time g_next_packet_time;
typedef struct {
uint8_t data[LIVESYNC_OUTGOING_BUFFSIZE_PEERS];
size_t fill;
ot_time next_packet_time;
} sync_buffer;
static pthread_t thread_id;
void livesync_init( ) {
static sync_buffer g_v6_buf;
static sync_buffer g_v4_buf;
if( g_socket_in == -1 )
exerr( "No socket address for live sync specified." );
static pthread_t thread_id;
void livesync_init() {
if (g_socket_in == -1)
exerr("No socket address for live sync specified.");
/* Prepare outgoing peers buffer */
memcpy( g_outbuf, &g_tracker_id, sizeof( g_tracker_id ) );
uint32_pack_big( g_outbuf + sizeof( g_tracker_id ), OT_SYNC_PEER);
g_outbuf_data = sizeof( g_tracker_id ) + sizeof( uint32_t );
memcpy(g_v6_buf.data, &g_tracker_id, sizeof(g_tracker_id));
memcpy(g_v4_buf.data, &g_tracker_id, sizeof(g_tracker_id));
g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
uint32_pack_big((char *)g_v6_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER6);
uint32_pack_big((char *)g_v4_buf.data + sizeof(g_tracker_id), OT_SYNC_PEER4);
pthread_create( &thread_id, NULL, livesync_worker, NULL );
g_v6_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t);
g_v4_buf.fill = sizeof(g_tracker_id) + sizeof(uint32_t);
g_v6_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
g_v4_buf.next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
pthread_create(&thread_id, NULL, livesync_worker, NULL);
}
void livesync_deinit() {
if( g_socket_in != -1 )
close( g_socket_in );
if( g_socket_out != -1 )
close( g_socket_out );
if (g_socket_in != -1)
close(g_socket_in);
if (g_socket_out != -1)
close(g_socket_out);
pthread_cancel( thread_id );
pthread_cancel(thread_id);
}
void livesync_bind_mcast( ot_ip6 ip, uint16_t port) {
char tmpip[4] = {0,0,0,0};
void livesync_bind_mcast(ot_ip6 ip, uint16_t port) {
char tmpip[4] = {0, 0, 0, 0};
char *v4ip;
if( !ip6_isv4mapped(ip))
if (!ip6_isv4mapped(ip))
exerr("v6 mcast support not yet available.");
v4ip = ip+12;
v4ip = ip + 12;
if( g_socket_in != -1 )
if (g_socket_in != -1)
exerr("Error: Livesync listen ip specified twice.");
if( ( g_socket_in = socket_udp4( )) < 0)
exerr("Error: Cant create live sync incoming socket." );
if ((g_socket_in = socket_udp4()) < 0)
exerr("Error: Cant create live sync incoming socket.");
ndelay_off(g_socket_in);
if( socket_bind4_reuse( g_socket_in, tmpip, port ) == -1 )
exerr("Error: Cant bind live sync incoming socket." );
if (socket_bind4_reuse(g_socket_in, tmpip, port) == -1)
exerr("Error: Cant bind live sync incoming socket.");
if( socket_mcjoin4( g_socket_in, groupip_1, v4ip ) )
if (socket_mcjoin4(g_socket_in, groupip_1, v4ip))
exerr("Error: Cant make live sync incoming socket join mcast group.");
if( ( g_socket_out = socket_udp4()) < 0)
exerr("Error: Cant create live sync outgoing socket." );
if( socket_bind4_reuse( g_socket_out, v4ip, port ) == -1 )
exerr("Error: Cant bind live sync outgoing socket." );
if ((g_socket_out = socket_udp4()) < 0)
exerr("Error: Cant create live sync outgoing socket.");
if (socket_bind4_reuse(g_socket_out, v4ip, port) == -1)
exerr("Error: Cant bind live sync outgoing socket.");
socket_mcttl4(g_socket_out, 1);
socket_mcloop4(g_socket_out, 0);
}
/* Caller MUST hold g_outbuf_mutex. Returns with g_outbuf_mutex unlocked */
static void livesync_issue_peersync( ) {
static void livesync_issue_peersync(sync_buffer *buf) {
char mycopy[LIVESYNC_OUTGOING_BUFFSIZE_PEERS];
size_t data = g_outbuf_data;
size_t fill = buf->fill;
memcpy( mycopy, g_outbuf, data );
g_outbuf_data = sizeof( g_tracker_id ) + sizeof( uint32_t );
g_next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
memcpy(mycopy, buf->data, fill);
buf->fill = sizeof(g_tracker_id) + sizeof(uint32_t);
buf->next_packet_time = g_now_seconds + LIVESYNC_MAXDELAY;
/* From now this thread has a local copy of the buffer and
has modified the protected element */
pthread_mutex_unlock(&g_outbuf_mutex);
socket_send4(g_socket_out, mycopy, data, groupip_1, LIVESYNC_PORT);
socket_send4(g_socket_out, mycopy, fill, groupip_1, LIVESYNC_PORT);
}
static void livesync_handle_peersync( struct ot_workstruct *ws ) {
int off = sizeof( g_tracker_id ) + sizeof( uint32_t );
static void livesync_handle_peersync(struct ot_workstruct *ws, size_t peer_size) {
size_t off = sizeof(g_tracker_id) + sizeof(uint32_t);
/* Now basic sanity checks have been done on the live sync packet
We might add more testing and logging. */
while( off + (ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer ) <= ws->request_size ) {
memcpy( &ws->peer, ws->request + off + sizeof(ot_hash), sizeof( ot_peer ) );
ws->hash = (ot_hash*)(ws->request + off);
while ((ssize_t)(off + sizeof(ot_hash) + peer_size) <= ws->request_size) {
memcpy(&ws->peer, ws->request + off + sizeof(ot_hash), peer_size);
ws->hash = (ot_hash *)(ws->request + off);
if( !g_opentracker_running ) return;
if (!g_opentracker_running)
return;
if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_STOPPED )
remove_peer_from_torrent( FLAG_MCA, ws );
if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED)
remove_peer_from_torrent(FLAG_MCA, ws);
else
add_peer_to_torrent_and_return_peers( FLAG_MCA, ws, /* amount = */ 0 );
add_peer_to_torrent_and_return_peers(FLAG_MCA, ws, /* amount = */ 0);
off += sizeof( ot_hash ) + sizeof( ot_peer );
off += sizeof(ot_hash) + peer_size;
}
stats_issue_event(EVENT_SYNC, 0,
(ws->request_size - sizeof( g_tracker_id ) - sizeof( uint32_t ) ) /
((ssize_t)sizeof( ot_hash ) + (ssize_t)sizeof( ot_peer )));
stats_issue_event(EVENT_SYNC, 0, (ws->request_size - sizeof(g_tracker_id) - sizeof(uint32_t)) / ((ssize_t)sizeof(ot_hash) + peer_size));
}
/* Tickle the live sync module from time to time, so no events get
stuck when there's not enough traffic to fill udp packets fast
enough */
void livesync_ticker( ) {
void livesync_ticker() {
/* livesync_issue_peersync sets g_next_packet_time */
pthread_mutex_lock(&g_outbuf_mutex);
if( g_now_seconds > g_next_packet_time &&
g_outbuf_data > sizeof( g_tracker_id ) + sizeof( uint32_t ) )
livesync_issue_peersync();
if (g_now_seconds > g_v6_buf.next_packet_time && g_v6_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t))
livesync_issue_peersync(&g_v6_buf);
else
pthread_mutex_unlock(&g_outbuf_mutex);
pthread_mutex_lock(&g_outbuf_mutex);
if (g_now_seconds > g_v4_buf.next_packet_time && g_v4_buf.fill > sizeof(g_tracker_id) + sizeof(uint32_t))
livesync_issue_peersync(&g_v4_buf);
else
pthread_mutex_unlock(&g_outbuf_mutex);
}
/* Inform live sync about whats going on. */
void livesync_tell( struct ot_workstruct *ws ) {
void livesync_tell(struct ot_workstruct *ws) {
size_t peer_size; /* initialized in next line */
ot_peer *peer_src = peer_from_peer6(&ws->peer, &peer_size);
sync_buffer *dest_buf = peer_size == OT_PEER_SIZE6 ? &g_v6_buf : &g_v4_buf;
pthread_mutex_lock(&g_outbuf_mutex);
memcpy( g_outbuf + g_outbuf_data, ws->hash, sizeof(ot_hash) );
memcpy( g_outbuf + g_outbuf_data + sizeof(ot_hash), &ws->peer, sizeof(ot_peer) );
memcpy(dest_buf->data + dest_buf->fill, ws->hash, sizeof(ot_hash));
dest_buf->fill += sizeof(ot_hash);
g_outbuf_data += sizeof(ot_hash) + sizeof(ot_peer);
memcpy(dest_buf->data + dest_buf->fill, peer_src, peer_size);
dest_buf->fill += peer_size;
if( g_outbuf_data >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS )
livesync_issue_peersync();
if (dest_buf->fill >= LIVESYNC_OUTGOING_BUFFSIZE_PEERS - LIVESYNC_OUTGOING_WATERMARK_PEERS)
livesync_issue_peersync(dest_buf);
else
pthread_mutex_unlock(&g_outbuf_mutex);
}
static void * livesync_worker( void * args ) {
static void *livesync_worker(void *args) {
struct ot_workstruct ws;
ot_ip6 in_ip; uint16_t in_port;
ot_ip6 in_ip;
uint16_t in_port;
(void)args;
/* Initialize our "thread local storage" */
ws.inbuf = ws.request = malloc( LIVESYNC_INCOMING_BUFFSIZE );
ws.outbuf = ws.reply = 0;
ws.inbuf = ws.request = malloc(LIVESYNC_INCOMING_BUFFSIZE);
ws.outbuf = ws.reply = 0;
memcpy( in_ip, V4mappedprefix, sizeof( V4mappedprefix ) );
memcpy(in_ip, V4mappedprefix, sizeof(V4mappedprefix));
while( 1 ) {
ws.request_size = socket_recv4(g_socket_in, (char*)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12+(char*)in_ip, &in_port);
while (1) {
ws.request_size = socket_recv4(g_socket_in, (char *)ws.inbuf, LIVESYNC_INCOMING_BUFFSIZE, 12 + (char *)in_ip, &in_port);
/* Expect at least tracker id and packet type */
if( ws.request_size <= (ssize_t)(sizeof( g_tracker_id ) + sizeof( uint32_t )) )
if (ws.request_size <= (ssize_t)(sizeof(g_tracker_id) + sizeof(uint32_t)))
continue;
if( !accesslist_isblessed(in_ip, OT_PERMISSION_MAY_LIVESYNC))
if (!accesslist_is_blessed(in_ip, OT_PERMISSION_MAY_LIVESYNC))
continue;
if( !memcmp( ws.inbuf, &g_tracker_id, sizeof( g_tracker_id ) ) ) {
if (!memcmp(ws.inbuf, &g_tracker_id, sizeof(g_tracker_id))) {
/* TODO: log packet coming from ourselves */
continue;
}
switch( uint32_read_big( sizeof( g_tracker_id ) + (char *)ws.inbuf ) ) {
case OT_SYNC_PEER:
livesync_handle_peersync( &ws );
switch (uint32_read_big(sizeof(g_tracker_id) + (char *)ws.inbuf)) {
case OT_SYNC_PEER6:
livesync_handle_peersync(&ws, OT_PEER_SIZE6);
break;
case OT_SYNC_PEER4:
livesync_handle_peersync(&ws, OT_PEER_SIZE4);
break;
default:
break;
@ -213,4 +237,3 @@ static void * livesync_worker( void * args ) {
}
#endif
const char *g_version_livesync_c = "$Source$: $Revision$\n";

View File

@ -28,13 +28,19 @@
Each tracker instance accumulates announce requests until its buffer is
full or a timeout is reached. Then it broadcasts its live sync packer:
packet type SYNC_LIVE
packet type SYNC_LIVE4
[ 0x0008 0x14 info_hash
0x001c 0x04 peer's ipv4 address
0x0020 0x02 peer's port
0x0024 0x02 peer flags v1 ( SEEDING = 0x80, COMPLETE = 0x40, STOPPED = 0x20 )
]*
packet type SYNC_LIVE6
[ 0x0008 0x14 info_hash
0x001c 0x10 peer's ipv6 address
0x002c 0x02 peer's port
0x002e 0x02 peer flags v1 ( SEEDING = 0x80, COMPLETE = 0x40, STOPPED = 0x20 )
]*
*/
#ifdef WANT_SYNC_LIVE
@ -45,18 +51,18 @@ void livesync_init();
void livesync_deinit();
/* Join multicast group for listening and create sending socket */
void livesync_bind_mcast( char *ip, uint16_t port );
void livesync_bind_mcast(char *ip, uint16_t port);
/* Inform live sync about whats going on. */
void livesync_tell( struct ot_workstruct *ws );
void livesync_tell(struct ot_workstruct *ws);
/* Tickle the live sync module from time to time, so no events get
stuck when there's not enough traffic to fill udp packets fast
enough */
void livesync_ticker( );
void livesync_ticker();
/* Handle an incoming live sync packet */
void handle_livesync( const int64 sock );
void handle_livesync(const int64 sock);
#else

View File

@ -16,42 +16,39 @@
#include "uint32.h"
/* Opentracker */
#include "trackerlogic.h"
#include "ot_iovec.h"
#include "ot_mutex.h"
#include "ot_stats.h"
#include "trackerlogic.h"
/* #define MTX_DBG( STRING ) fprintf( stderr, STRING ) */
#define MTX_DBG( STRING )
#define MTX_DBG(STRING)
/* Our global all torrents list */
static ot_vector all_torrents[OT_BUCKET_COUNT];
static ot_vector all_torrents[OT_BUCKET_COUNT];
static pthread_mutex_t bucket_mutex[OT_BUCKET_COUNT];
static size_t g_torrent_count;
static size_t g_torrent_count;
/* Self pipe from opentracker.c */
extern int g_self_pipe[2];
extern int g_self_pipe[2];
ot_vector *mutex_bucket_lock( int bucket ) {
pthread_mutex_lock(bucket_mutex + bucket );
ot_vector *mutex_bucket_lock(int bucket) {
pthread_mutex_lock(bucket_mutex + bucket);
return all_torrents + bucket;
}
ot_vector *mutex_bucket_lock_by_hash( ot_hash hash ) {
return mutex_bucket_lock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT );
}
ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash) { return mutex_bucket_lock(uint32_read_big((const char *)hash) >> OT_BUCKET_COUNT_SHIFT); }
void mutex_bucket_unlock( int bucket, int delta_torrentcount ) {
void mutex_bucket_unlock(int bucket, int delta_torrentcount) {
pthread_mutex_unlock(bucket_mutex + bucket);
g_torrent_count += delta_torrentcount;
}
void mutex_bucket_unlock_by_hash( ot_hash hash, int delta_torrentcount ) {
mutex_bucket_unlock( uint32_read_big( (char*)hash ) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount );
void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount) {
mutex_bucket_unlock(uint32_read_big((char *)hash) >> OT_BUCKET_COUNT_SHIFT, delta_torrentcount);
}
size_t mutex_get_torrent_count( ) {
return g_torrent_count;
}
size_t mutex_get_torrent_count() { return g_torrent_count; }
/* TaskQueue Magic */
@ -64,16 +61,16 @@ struct ot_task {
struct ot_task *next;
};
static ot_taskid next_free_taskid = 1;
static ot_taskid next_free_taskid = 1;
static struct ot_task *tasklist;
static pthread_mutex_t tasklist_mutex;
static pthread_cond_t tasklist_being_filled;
static pthread_cond_t tasklist_being_filled;
int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) {
struct ot_task ** tmptask, * task;
int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype) {
struct ot_task **tmptask, *task;
task = malloc(sizeof( struct ot_task));
if( !task )
task = malloc(sizeof(struct ot_task));
if (!task)
return -1;
task->taskid = 0;
@ -84,98 +81,98 @@ int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype ) {
task->next = 0;
/* Want exclusive access to tasklist */
pthread_mutex_lock( &tasklist_mutex );
pthread_mutex_lock(&tasklist_mutex);
/* Skip to end of list */
tmptask = &tasklist;
while( *tmptask )
while (*tmptask)
tmptask = &(*tmptask)->next;
*tmptask = task;
/* Inform waiting workers and release lock */
pthread_cond_broadcast( &tasklist_being_filled );
pthread_mutex_unlock( &tasklist_mutex );
pthread_cond_broadcast(&tasklist_being_filled);
pthread_mutex_unlock(&tasklist_mutex);
return 0;
}
void mutex_workqueue_canceltask( int64 sock ) {
struct ot_task ** task;
void mutex_workqueue_canceltask(int64 sock) {
struct ot_task **task;
/* Want exclusive access to tasklist */
pthread_mutex_lock( &tasklist_mutex );
pthread_mutex_lock(&tasklist_mutex);
for (task = &tasklist; *task; task = &((*task)->next))
if ((*task)->sock == sock) {
struct iovec *iovec = (*task)->iovec;
struct iovec *iovec = (*task)->iovec;
struct ot_task *ptask = *task;
int i;
int i;
/* Free task's iovec */
for( i=0; i<(*task)->iovec_entries; ++i )
free( iovec[i].iov_base );
for (i = 0; i < (*task)->iovec_entries; ++i)
free(iovec[i].iov_base);
*task = (*task)->next;
free( ptask );
free(ptask);
break;
}
/* Release lock */
pthread_mutex_unlock( &tasklist_mutex );
pthread_mutex_unlock(&tasklist_mutex);
}
ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype ) {
struct ot_task * task;
ot_taskid taskid = 0;
ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype) {
struct ot_task *task;
ot_taskid taskid = 0;
/* Want exclusive access to tasklist */
pthread_mutex_lock( &tasklist_mutex );
pthread_mutex_lock(&tasklist_mutex);
while( !taskid ) {
while (!taskid) {
/* Skip to the first unassigned task this worker wants to do */
for (task = tasklist; task; task = task->next)
if (!task->taskid && ( TASK_CLASS_MASK & task->tasktype ) == *tasktype) {
if (!task->taskid && (TASK_CLASS_MASK & task->tasktype) == *tasktype) {
/* If we found an outstanding task, assign a taskid to it
and leave the loop */
task->taskid = taskid = ++next_free_taskid;
*tasktype = task->tasktype;
*tasktype = task->tasktype;
break;
}
/* Wait until the next task is being fed */
if (!taskid)
pthread_cond_wait( &tasklist_being_filled, &tasklist_mutex );
pthread_cond_wait(&tasklist_being_filled, &tasklist_mutex);
}
/* Release lock */
pthread_mutex_unlock( &tasklist_mutex );
pthread_mutex_unlock(&tasklist_mutex);
return taskid;
}
void mutex_workqueue_pushsuccess( ot_taskid taskid ) {
struct ot_task ** task;
void mutex_workqueue_pushsuccess(ot_taskid taskid) {
struct ot_task **task;
/* Want exclusive access to tasklist */
pthread_mutex_lock( &tasklist_mutex );
pthread_mutex_lock(&tasklist_mutex);
for (task = &tasklist; *task; task = &((*task)->next))
if ((*task)->taskid == taskid) {
struct ot_task *ptask = *task;
*task = (*task)->next;
free( ptask );
free(ptask);
break;
}
/* Release lock */
pthread_mutex_unlock( &tasklist_mutex );
pthread_mutex_unlock(&tasklist_mutex);
}
int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovec ) {
struct ot_task * task;
const char byte = 'o';
int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovec) {
struct ot_task *task;
const char byte = 'o';
/* Want exclusive access to tasklist */
pthread_mutex_lock( &tasklist_mutex );
pthread_mutex_lock(&tasklist_mutex);
for (task = tasklist; task; task = task->next)
if (task->taskid == taskid) {
@ -186,55 +183,90 @@ int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iove
}
/* Release lock */
pthread_mutex_unlock( &tasklist_mutex );
pthread_mutex_unlock(&tasklist_mutex);
io_trywrite( g_self_pipe[1], &byte, 1 );
io_trywrite(g_self_pipe[1], &byte, 1);
/* Indicate whether the worker has to throw away results */
return task ? 0 : -1;
}
int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovec ) {
struct ot_task ** task;
int64 sock = -1;
int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec) {
struct ot_task *task;
const char byte = 'o';
/* Want exclusive access to tasklist */
pthread_mutex_lock( &tasklist_mutex );
pthread_mutex_lock(&tasklist_mutex);
for (task = &tasklist; *task; task = &((*task)->next))
if ((*task)->tasktype == TASK_DONE) {
struct ot_task *ptask = *task;
*iovec_entries = (*task)->iovec_entries;
*iovec = (*task)->iovec;
sock = (*task)->sock;
*task = (*task)->next;
free( ptask );
for (task = tasklist; task; task = task->next)
if (task->taskid == taskid) {
if (iovec) {
if (iovec_append(&task->iovec_entries, &task->iovec, iovec))
task->tasktype = TASK_DONE_PARTIAL;
else
task = NULL;
} else
task->tasktype = TASK_DONE;
break;
}
/* Release lock */
pthread_mutex_unlock( &tasklist_mutex );
pthread_mutex_unlock(&tasklist_mutex);
io_trywrite(g_self_pipe[1], &byte, 1);
/* Indicate whether the worker has to throw away results */
return task ? 0 : -1;
}
int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovec, int *is_partial) {
struct ot_task **task;
int64 sock = -1;
*is_partial = 0;
/* Want exclusive access to tasklist */
pthread_mutex_lock(&tasklist_mutex);
for (task = &tasklist; *task; task = &((*task)->next))
if (((*task)->tasktype & TASK_CLASS_MASK) == TASK_DONE) {
struct ot_task *ptask = *task;
*iovec_entries = ptask->iovec_entries;
*iovec = ptask->iovec;
sock = ptask->sock;
if ((*task)->tasktype == TASK_DONE) {
*task = ptask->next;
free(ptask);
} else {
ptask->iovec_entries = 0;
ptask->iovec = NULL;
*is_partial = 1;
/* Prevent task from showing up immediately again unless new data was added */
(*task)->tasktype = TASK_FULLSCRAPE;
}
break;
}
/* Release lock */
pthread_mutex_unlock(&tasklist_mutex);
return sock;
}
void mutex_init( ) {
void mutex_init() {
int i;
pthread_mutex_init(&tasklist_mutex, NULL);
pthread_cond_init (&tasklist_being_filled, NULL);
for (i=0; i < OT_BUCKET_COUNT; ++i)
pthread_mutex_init(bucket_mutex + i, NULL);
byte_zero( all_torrents, sizeof( all_torrents ) );
pthread_cond_init(&tasklist_being_filled, NULL);
for (i = 0; i < OT_BUCKET_COUNT; ++i)
pthread_mutex_init(bucket_mutex + i, NULL);
byte_zero(all_torrents, sizeof(all_torrents));
}
void mutex_deinit( ) {
void mutex_deinit() {
int i;
for (i=0; i < OT_BUCKET_COUNT; ++i)
pthread_mutex_destroy(bucket_mutex + i);
for (i = 0; i < OT_BUCKET_COUNT; ++i)
pthread_mutex_destroy(bucket_mutex + i);
pthread_mutex_destroy(&tasklist_mutex);
pthread_cond_destroy(&tasklist_being_filled);
byte_zero( all_torrents, sizeof( all_torrents ) );
byte_zero(all_torrents, sizeof(all_torrents));
}
const char *g_version_mutex_c = "$Source$: $Revision$\n";

View File

@ -7,69 +7,74 @@
#define OT_MUTEX_H__
#include <sys/uio.h>
#include "trackerlogic.h"
void mutex_init( void );
void mutex_deinit( void );
void mutex_init(void);
void mutex_deinit(void);
ot_vector *mutex_bucket_lock( int bucket );
ot_vector *mutex_bucket_lock_by_hash( ot_hash hash );
ot_vector *mutex_bucket_lock(int bucket);
ot_vector *mutex_bucket_lock_by_hash(ot_hash const hash);
void mutex_bucket_unlock( int bucket, int delta_torrentcount );
void mutex_bucket_unlock_by_hash( ot_hash hash, int delta_torrentcount );
void mutex_bucket_unlock(int bucket, int delta_torrentcount);
void mutex_bucket_unlock_by_hash(ot_hash const hash, int delta_torrentcount);
size_t mutex_get_torrent_count(void);
size_t mutex_get_torrent_count(void);
typedef enum {
TASK_STATS_CONNS = 0x0001,
TASK_STATS_TCP = 0x0002,
TASK_STATS_UDP = 0x0003,
TASK_STATS_SCRAPE = 0x0004,
TASK_STATS_FULLSCRAPE = 0x0005,
TASK_STATS_TPB = 0x0006,
TASK_STATS_HTTPERRORS = 0x0007,
TASK_STATS_VERSION = 0x0008,
TASK_STATS_BUSY_NETWORKS = 0x0009,
TASK_STATS_RENEW = 0x000a,
TASK_STATS_SYNCS = 0x000b,
TASK_STATS_COMPLETED = 0x000c,
TASK_STATS_NUMWANTS = 0x000d,
TASK_STATS_CONNS = 0x0001,
TASK_STATS_TCP = 0x0002,
TASK_STATS_UDP = 0x0003,
TASK_STATS_SCRAPE = 0x0004,
TASK_STATS_FULLSCRAPE = 0x0005,
TASK_STATS_TPB = 0x0006,
TASK_STATS_HTTPERRORS = 0x0007,
TASK_STATS_VERSION = 0x0008,
TASK_STATS_BUSY_NETWORKS = 0x0009,
TASK_STATS_RENEW = 0x000a,
TASK_STATS_SYNCS = 0x000b,
TASK_STATS_COMPLETED = 0x000c,
TASK_STATS_NUMWANTS = 0x000d,
TASK_STATS = 0x0100, /* Mask */
TASK_STATS_TORRENTS = 0x0101,
TASK_STATS_PEERS = 0x0102,
TASK_STATS_SLASH24S = 0x0103,
TASK_STATS_TOP10 = 0x0104,
TASK_STATS_TOP100 = 0x0105,
TASK_STATS_EVERYTHING = 0x0106,
TASK_STATS_FULLLOG = 0x0107,
TASK_STATS_WOODPECKERS = 0x0108,
TASK_FULLSCRAPE = 0x0200, /* Default mode */
TASK_FULLSCRAPE_TPB_BINARY = 0x0201,
TASK_FULLSCRAPE_TPB_ASCII = 0x0202,
TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203,
TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204,
TASK_FULLSCRAPE_TRACKERSTATE = 0x0205,
TASK_STATS = 0x0100, /* Mask */
TASK_STATS_TORRENTS = 0x0101,
TASK_STATS_PEERS = 0x0102,
TASK_STATS_SLASH24S = 0x0103,
TASK_STATS_TOP10 = 0x0104,
TASK_STATS_TOP100 = 0x0105,
TASK_STATS_EVERYTHING = 0x0106,
TASK_STATS_FULLLOG = 0x0107,
TASK_STATS_WOODPECKERS = 0x0108,
TASK_DMEM = 0x0300,
TASK_FULLSCRAPE = 0x0200, /* Default mode */
TASK_FULLSCRAPE_TPB_BINARY = 0x0201,
TASK_FULLSCRAPE_TPB_ASCII = 0x0202,
TASK_FULLSCRAPE_TPB_ASCII_PLUS = 0x0203,
TASK_FULLSCRAPE_TPB_URLENCODED = 0x0204,
TASK_FULLSCRAPE_TRACKERSTATE = 0x0205,
TASK_DONE = 0x0f00,
TASK_DMEM = 0x0300,
TASK_FLAG_GZIP = 0x1000,
TASK_FLAG_BZIP2 = 0x2000,
TASK_DONE = 0x0f00,
TASK_DONE_PARTIAL = 0x0f01,
TASK_TASK_MASK = 0x0fff,
TASK_CLASS_MASK = 0x0f00,
TASK_FLAGS_MASK = 0xf000
TASK_FLAG_GZIP = 0x1000,
TASK_FLAG_BZIP2 = 0x2000,
TASK_FLAG_ZSTD = 0x4000,
TASK_FLAG_CHUNKED = 0x8000,
TASK_TASK_MASK = 0x0fff,
TASK_CLASS_MASK = 0x0f00,
TASK_FLAGS_MASK = 0xf000
} ot_tasktype;
typedef unsigned long ot_taskid;
int mutex_workqueue_pushtask( int64 sock, ot_tasktype tasktype );
void mutex_workqueue_canceltask( int64 sock );
void mutex_workqueue_pushsuccess( ot_taskid taskid );
ot_taskid mutex_workqueue_poptask( ot_tasktype *tasktype );
int mutex_workqueue_pushresult( ot_taskid taskid, int iovec_entries, struct iovec *iovector );
int64 mutex_workqueue_popresult( int *iovec_entries, struct iovec ** iovector );
int mutex_workqueue_pushtask(int64 sock, ot_tasktype tasktype);
void mutex_workqueue_canceltask(int64 sock);
void mutex_workqueue_pushsuccess(ot_taskid taskid);
ot_taskid mutex_workqueue_poptask(ot_tasktype *tasktype);
int mutex_workqueue_pushresult(ot_taskid taskid, int iovec_entries, struct iovec *iovector);
int mutex_workqueue_pushchunked(ot_taskid taskid, struct iovec *iovec);
int64 mutex_workqueue_popresult(int *iovec_entries, struct iovec **iovector, int *is_partial);
#endif

View File

@ -486,5 +486,3 @@ void rijndaelEncrypt128(const uint32_t rk[44], const uint8_t pt[16], uint8_t ct[
rk[43];
PUTU32(ct + 12, s3);
}
const char *g_version_rijndael_c = "$Source$: $Revision$\n";

File diff suppressed because it is too large Load Diff

View File

@ -6,10 +6,12 @@
#ifndef OT_STATS_H__
#define OT_STATS_H__
#include "trackerlogic.h"
typedef enum {
EVENT_ACCEPT,
EVENT_READ,
EVENT_CONNECT, /* UDP only */
EVENT_CONNECT, /* UDP only */
EVENT_ANNOUNCE,
EVENT_COMPLETED,
EVENT_RENEW,
@ -17,7 +19,8 @@ typedef enum {
EVENT_SCRAPE,
EVENT_FULLSCRAPE_REQUEST,
EVENT_FULLSCRAPE_REQUEST_GZIP,
EVENT_FULLSCRAPE, /* TCP only */
EVENT_FULLSCRAPE_REQUEST_ZSTD,
EVENT_FULLSCRAPE, /* TCP only */
EVENT_FAILED,
EVENT_BUCKET_LOCKED,
EVENT_WOODPECKER,
@ -38,15 +41,12 @@ enum {
CODE_HTTPERROR_COUNT
};
void stats_issue_event( ot_status_event event, PROTO_FLAG proto, uintptr_t event_data );
void stats_deliver( int64 sock, int tasktype );
void stats_cleanup( void );
size_t return_stats_for_tracker( char *reply, int mode, int format );
size_t stats_return_tracker_version( char *reply );
void stats_init( void );
void stats_deinit( void );
extern const char *g_version_rijndael_c;
extern const char *g_version_livesync_c;
void stats_issue_event(ot_status_event event, PROTO_FLAG proto, uintptr_t event_data);
void stats_deliver(int64 sock, int tasktype);
void stats_cleanup(void);
size_t return_stats_for_tracker(char *reply, int mode, int format);
size_t stats_return_tracker_version(char *reply);
void stats_init(void);
void stats_deinit(void);
#endif

120
ot_sync.c
View File

@ -4,64 +4,66 @@
$id$ */
/* System */
#include <sys/types.h>
#include <sys/mman.h>
#include <sys/uio.h>
#include <pthread.h>
#include <stdio.h>
#include <string.h>
#include <pthread.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/uio.h>
/* Libowfat */
#include "scan.h"
#include "byte.h"
#include "io.h"
#include "scan.h"
/* Opentracker */
#include "trackerlogic.h"
#include "ot_mutex.h"
#include "ot_sync.h"
#include "ot_stats.h"
#include "ot_iovec.h"
#include "ot_mutex.h"
#include "ot_stats.h"
#include "ot_sync.h"
#include "trackerlogic.h"
#ifdef WANT_SYNC_BATCH
#define OT_SYNC_CHUNK_SIZE (512*1024)
#define OT_SYNC_CHUNK_SIZE (512 * 1024)
/* Import Changeset from an external authority
format: d4:syncd[..]ee
[..]: ( 20:01234567890abcdefghij16:XXXXYYYY )+
*/
int add_changeset_to_tracker( uint8_t *data, size_t len ) {
ot_hash *hash;
uint8_t *end = data + len;
unsigned long peer_count;
int add_changeset_to_tracker(uint8_t *data, size_t len) {
ot_hash *hash;
uint8_t *end = data + len;
unsigned long peer_count;
/* We do know, that the string is \n terminated, so it cant
overflow */
if( byte_diff( data, 8, "d4:syncd" ) ) return -1;
if (byte_diff(data, 8, "d4:syncd"))
return -1;
data += 8;
while( 1 ) {
if( byte_diff( data, 3, "20:" ) ) {
if( byte_diff( data, 2, "ee" ) )
while (1) {
if (byte_diff(data, 3, "20:")) {
if (byte_diff(data, 2, "ee"))
return -1;
return 0;
}
data += 3;
hash = (ot_hash*)data;
data += sizeof( ot_hash );
hash = (ot_hash *)data;
data += sizeof(ot_hash);
/* Scan string length indicator */
data += ( len = scan_ulong( (char*)data, &peer_count ) );
data += (len = scan_ulong((char *)data, &peer_count));
/* If no long was scanned, it is not divisible by 8, it is not
followed by a colon or claims to need to much memory, we fail */
if( !len || !peer_count || ( peer_count & 7 ) || ( *data++ != ':' ) || ( data + peer_count > end ) )
if (!len || !peer_count || (peer_count & 7) || (*data++ != ':') || (data + peer_count > end))
return -1;
while( peer_count > 0 ) {
add_peer_to_torrent( hash, (ot_peer*)data, 1 );
data += 8; peer_count -= 8;
while (peer_count > 0) {
add_peer_to_torrent(hash, (ot_peer *)data, 1);
data += 8;
peer_count -= 8;
}
}
return 0;
@ -70,80 +72,86 @@ int add_changeset_to_tracker( uint8_t *data, size_t len ) {
/* Proposed output format
d4:syncd20:<info_hash>8*N:(xxxxyyyy)*Nee
*/
static void sync_make( int *iovec_entries, struct iovec **iovector ) {
int bucket;
char *r, *re;
static void sync_make(int *iovec_entries, struct iovec **iovector) {
int bucket;
char *r, *re;
/* Setup return vector... */
*iovec_entries = 0;
*iovector = NULL;
if( !( r = iovec_increase( iovec_entries, iovector, OT_SYNC_CHUNK_SIZE ) ) )
*iovector = NULL;
if (!(r = iovec_increase(iovec_entries, iovector, OT_SYNC_CHUNK_SIZE)))
return;
/* ... and pointer to end of current output buffer.
This works as a low watermark */
re = r + OT_SYNC_CHUNK_SIZE;
memmove( r, "d4:syncd", 8 ); r += 8;
memmove(r, "d4:syncd", 8);
r += 8;
/* For each bucket... */
for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) {
for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
/* Get exclusive access to that bucket */
ot_vector *torrents_list = mutex_bucket_lock( bucket );
size_t tor_offset;
ot_vector *torrents_list = mutex_bucket_lock(bucket);
size_t tor_offset;
/* For each torrent in this bucket.. */
for( tor_offset=0; tor_offset<torrents_list->size; ++tor_offset ) {
for (tor_offset = 0; tor_offset < torrents_list->size; ++tor_offset) {
/* Address torrents members */
ot_peerlist *peer_list = ( ((ot_torrent*)(torrents_list->data))[tor_offset] ).peer_list;
ot_hash *hash =&( ((ot_torrent*)(torrents_list->data))[tor_offset] ).hash;
ot_peerlist *peer_list = (((ot_torrent *)(torrents_list->data))[tor_offset]).peer_list;
ot_hash *hash = &(((ot_torrent *)(torrents_list->data))[tor_offset]).hash;
const size_t byte_count = sizeof(ot_peer) * peer_list->changeset.size;
/* If we reached our low watermark in buffer... */
if( re - r <= (ssize_t)(/* strlen( "20:" ) == */ 3 + sizeof( ot_hash ) + /* strlen_max( "%zd" ) == */ 12 + byte_count ) ) {
if (re - r <= (ssize_t)(/* strlen( "20:" ) == */ 3 + sizeof(ot_hash) + /* strlen_max( "%zd" ) == */ 12 + byte_count)) {
/* Allocate a fresh output buffer at the end of our buffers list
release bucket and return, if that fails */
if( !( r = iovec_fix_increase_or_free( iovec_entries, iovector, r, OT_SYNC_CHUNK_SIZE ) ) )
return mutex_bucket_unlock( bucket );
if (!(r = iovec_fix_increase_or_free(iovec_entries, iovector, r, OT_SYNC_CHUNK_SIZE)))
return mutex_bucket_unlock(bucket);
/* Adjust new end of output buffer */
re = r + OT_SYNC_CHUNK_SIZE;
}
*r++ = '2'; *r++ = '0'; *r++ = ':';
memmove( r, hash, sizeof( ot_hash ) ); r += sizeof( ot_hash );
r += sprintf( r, "%zd:", byte_count );
memmove( r, peer_list->changeset.data, byte_count ); r += byte_count;
*r++ = '2';
*r++ = '0';
*r++ = ':';
memmove(r, hash, sizeof(ot_hash));
r += sizeof(ot_hash);
r += sprintf(r, "%zd:", byte_count);
memmove(r, peer_list->changeset.data, byte_count);
r += byte_count;
}
/* All torrents done: release lock on currenct bucket */
mutex_bucket_unlock( bucket );
mutex_bucket_unlock(bucket);
}
/* Close bencoded sync dictionary */
*r++='e'; *r++='e';
*r++ = 'e';
*r++ = 'e';
/* Release unused memory in current output buffer */
iovec_fixlast( iovec_entries, iovector, r );
iovec_fixlast(iovec_entries, iovector, r);
}
/* This is the entry point into this worker thread
It grabs tasks from mutex_tasklist and delivers results back
*/
static void * sync_worker( void * args) {
int iovec_entries;
static void *sync_worker(void *args) {
int iovec_entries;
struct iovec *iovector;
args = args;
while( 1 ) {
while (1) {
ot_tasktype tasktype = TASK_SYNC_OUT;
ot_taskid taskid = mutex_workqueue_poptask( &tasktype );
sync_make( &iovec_entries, &iovector );
stats_issue_event( EVENT_SYNC_OUT, FLAG_TCP, iovec_length( &iovec_entries, &iovector) );
if( mutex_workqueue_pushresult( taskid, iovec_entries, iovector ) )
iovec_free( &iovec_entries, &iovector );
ot_taskid taskid = mutex_workqueue_poptask(&tasktype);
sync_make(&iovec_entries, &iovector);
stats_issue_event(EVENT_SYNC_OUT, FLAG_TCP, iovec_length(&iovec_entries, &iovector));
if (mutex_workqueue_pushresult(taskid, iovec_entries, iovector))
iovec_free(&iovec_entries, &iovector);
}
return NULL;
}
@ -162,5 +170,3 @@ void sync_deliver( int64 socket ) {
}
#endif
const char *g_version_sync_c = "$Source$: $Revision$\n";

View File

@ -9,11 +9,11 @@
#ifdef WANT_SYNC_BATCH
enum { SYNC_IN, SYNC_OUT };
void sync_init( );
void sync_deinit( );
void sync_deliver( int64 socket );
void sync_init();
void sync_deinit();
void sync_deliver(int64 socket);
int add_changeset_to_tracker( uint8_t *data, size_t len );
int add_changeset_to_tracker(uint8_t *data, size_t len);
#else
#define sync_init()

265
ot_udp.c
View File

@ -4,30 +4,31 @@
$id$ */
/* System */
#include <stdlib.h>
#include <pthread.h>
#include <string.h>
#include <arpa/inet.h>
#include <pthread.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
/* Libowfat */
#include "socket.h"
#include "io.h"
#include "ip6.h"
#include "socket.h"
/* Opentracker */
#include "trackerlogic.h"
#include "ot_udp.h"
#include "ot_stats.h"
#include "ot_rijndael.h"
#include "ot_stats.h"
#include "ot_udp.h"
#include "trackerlogic.h"
#if 0
static const uint8_t g_static_connid[8] = { 0x23, 0x42, 0x05, 0x17, 0xde, 0x41, 0x50, 0xff };
#endif
static uint32_t g_rijndael_round_key[44] = {0};
static uint32_t g_key_of_the_hour[2] = {0};
static uint32_t g_key_of_the_hour[2] = {0};
static ot_time g_hour_of_the_key;
static void udp_generate_rijndael_round_key() {
static void udp_generate_rijndael_round_key() {
uint32_t key[16];
#ifdef WANT_ARC4RANDOM
arc4random_buf(&key[0], sizeof(key));
@ -37,7 +38,7 @@ static void udp_generate_rijndael_round_key() {
key[2] = random();
key[3] = random();
#endif
rijndaelKeySetupEnc128( g_rijndael_round_key, (uint8_t*)key );
rijndaelKeySetupEnc128(g_rijndael_round_key, (uint8_t *)key);
#ifdef WANT_ARC4RANDOM
g_key_of_the_hour[0] = arc4random();
@ -48,180 +49,188 @@ static void udp_generate_rijndael_round_key() {
}
/* Generate current and previous connection id for ip */
static void udp_make_connectionid( uint32_t connid[2], const ot_ip6 remoteip, int age ) {
static void udp_make_connectionid(uint32_t connid[2], const ot_ip6 remoteip, int age) {
uint32_t plain[4], crypt[4];
int i;
if( g_now_minutes + 60 > g_hour_of_the_key ) {
g_hour_of_the_key = g_now_minutes;
int i;
if (g_now_minutes + 60 > g_hour_of_the_key) {
g_hour_of_the_key = g_now_minutes;
g_key_of_the_hour[1] = g_key_of_the_hour[0];
#ifdef WANT_ARC4RANDOM
g_key_of_the_hour[0] = arc4random();
g_key_of_the_hour[0] = arc4random();
#else
g_key_of_the_hour[0] = random();
g_key_of_the_hour[0] = random();
#endif
}
memcpy( plain, remoteip, sizeof( plain ) );
for( i=0; i<4; ++i ) plain[i] ^= g_key_of_the_hour[age];
rijndaelEncrypt128( g_rijndael_round_key, (uint8_t*)remoteip, (uint8_t*)crypt );
memcpy(plain, remoteip, sizeof(plain));
for (i = 0; i < 4; ++i)
plain[i] ^= g_key_of_the_hour[age];
rijndaelEncrypt128(g_rijndael_round_key, (uint8_t *)remoteip, (uint8_t *)crypt);
connid[0] = crypt[0] ^ crypt[1];
connid[1] = crypt[2] ^ crypt[3];
}
/* UDP implementation according to http://xbtt.sourceforge.net/udp_tracker_protocol.html */
int handle_udp6( int64 serversocket, struct ot_workstruct *ws ) {
ot_ip6 remoteip;
uint32_t *inpacket = (uint32_t*)ws->inbuf;
uint32_t *outpacket = (uint32_t*)ws->outbuf;
uint32_t numwant, left, event, scopeid;
uint32_t connid[2];
uint32_t action;
uint16_t port, remoteport;
size_t byte_count, scrape_count;
int handle_udp6(int64 serversocket, struct ot_workstruct *ws) {
ot_ip6 remoteip;
uint32_t *inpacket = (uint32_t *)ws->inbuf;
uint32_t *outpacket = (uint32_t *)ws->outbuf;
uint32_t left, event, scopeid;
uint32_t connid[2];
uint32_t action;
uint16_t port, remoteport;
size_t byte_count, scrape_count;
byte_count = socket_recv6( serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid );
if( !byte_count ) return 0;
byte_count = socket_recv6(serversocket, ws->inbuf, G_INBUF_SIZE, remoteip, &remoteport, &scopeid);
if (!byte_count)
return 0;
stats_issue_event( EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip );
stats_issue_event( EVENT_READ, FLAG_UDP, byte_count );
stats_issue_event(EVENT_ACCEPT, FLAG_UDP, (uintptr_t)remoteip);
stats_issue_event(EVENT_READ, FLAG_UDP, byte_count);
/* Minimum udp tracker packet size, also catches error */
if( byte_count < 16 )
if (byte_count < 16)
return 1;
/* Get action to take. Ignore error messages and broken packets */
action = ntohl( inpacket[2] );
if( action > 2 )
action = ntohl(inpacket[2]);
if (action > 2)
return 1;
/* Generate the connection id we give out and expect to and from
the requesting ip address, this prevents udp spoofing */
udp_make_connectionid( connid, remoteip, 0 );
udp_make_connectionid(connid, remoteip, 0);
/* Initialise hash pointer */
ws->hash = NULL;
ws->hash = NULL;
ws->peer_id = NULL;
/* If action is not 0 (connect), then we expect the derived
connection id in first 64 bit */
if( ( action > 0 ) && ( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) ) {
if ((action > 0) && (inpacket[0] != connid[0] || inpacket[1] != connid[1])) {
/* If connection id does not match, try the one that was
valid in the previous hour. Only if this also does not
match, return an error packet */
udp_make_connectionid( connid, remoteip, 1 );
if( inpacket[0] != connid[0] || inpacket[1] != connid[1] ) {
const size_t s = sizeof( "Connection ID missmatch." );
outpacket[0] = htonl( 3 ); outpacket[1] = inpacket[3];
memcpy( &outpacket[2], "Connection ID missmatch.", s );
socket_send6( serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0 );
stats_issue_event( EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s );
udp_make_connectionid(connid, remoteip, 1);
if (inpacket[0] != connid[0] || inpacket[1] != connid[1]) {
const size_t s = sizeof("Connection ID missmatch.");
outpacket[0] = htonl(3);
outpacket[1] = inpacket[3];
memcpy(&outpacket[2], "Connection ID missmatch.", s);
socket_send6(serversocket, ws->outbuf, 8 + s, remoteip, remoteport, 0);
stats_issue_event(EVENT_CONNID_MISSMATCH, FLAG_UDP, 8 + s);
return 1;
}
}
switch( action ) {
case 0: /* This is a connect action */
/* look for udp bittorrent magic id */
if( (ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980) )
return 1;
switch (action) {
case 0: /* This is a connect action */
/* look for udp bittorrent magic id */
if ((ntohl(inpacket[0]) != 0x00000417) || (ntohl(inpacket[1]) != 0x27101980))
return 1;
outpacket[0] = 0;
outpacket[1] = inpacket[3];
outpacket[2] = connid[0];
outpacket[3] = connid[1];
outpacket[0] = 0;
outpacket[1] = inpacket[3];
outpacket[2] = connid[0];
outpacket[3] = connid[1];
socket_send6( serversocket, ws->outbuf, 16, remoteip, remoteport, 0 );
stats_issue_event( EVENT_CONNECT, FLAG_UDP, 16 );
socket_send6(serversocket, ws->outbuf, 16, remoteip, remoteport, 0);
stats_issue_event(EVENT_CONNECT, FLAG_UDP, 16);
break;
case 1: /* This is an announce action */
/* Minimum udp announce packet size */
if (byte_count < 98)
return 1;
/* We do only want to know, if it is zero */
left = inpacket[64 / 4] | inpacket[68 / 4];
event = ntohl(inpacket[80 / 4]);
port = *(uint16_t *)(((char *)inpacket) + 96);
ws->hash = (ot_hash *)(((char *)inpacket) + 16);
OT_SETIP(ws->peer, remoteip);
OT_SETPORT(ws->peer, &port);
OT_PEERFLAG(ws->peer) = 0;
switch (event) {
case 1:
OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED;
break;
case 1: /* This is an announce action */
/* Minimum udp announce packet size */
if( byte_count < 98 )
return 1;
/* We do only want to know, if it is zero */
left = inpacket[64/4] | inpacket[68/4];
/* Limit amount of peers to 200 */
numwant = ntohl( inpacket[92/4] );
if (numwant > 200) numwant = 200;
event = ntohl( inpacket[80/4] );
port = *(uint16_t*)( ((char*)inpacket) + 96 );
ws->hash = (ot_hash*)( ((char*)inpacket) + 16 );
OT_SETIP( &ws->peer, remoteip );
OT_SETPORT( &ws->peer, &port );
OT_PEERFLAG( &ws->peer ) = 0;
switch( event ) {
case 1: OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED; break;
case 3: OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED; break;
default: break;
}
if( !left )
OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_SEEDING;
outpacket[0] = htonl( 1 ); /* announce action */
outpacket[1] = inpacket[12/4];
if( OT_PEERFLAG( &ws->peer ) & PEER_FLAG_STOPPED ) { /* Peer is gone. */
ws->reply = ws->outbuf;
ws->reply_size = remove_peer_from_torrent( FLAG_UDP, ws );
} else {
ws->reply = ws->outbuf + 8;
ws->reply_size = 8 + add_peer_to_torrent_and_return_peers( FLAG_UDP, ws, numwant );
}
socket_send6( serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0 );
stats_issue_event( EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size );
case 3:
OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED;
break;
case 2: /* This is a scrape action */
outpacket[0] = htonl( 2 ); /* scrape action */
outpacket[1] = inpacket[12/4];
for( scrape_count = 0; ( scrape_count * 20 < byte_count - 16) && ( scrape_count <= 74 ); scrape_count++ )
return_udp_scrape_for_torrent( *(ot_hash*)( ((char*)inpacket) + 16 + 20 * scrape_count ), ((char*)outpacket) + 8 + 12 * scrape_count );
socket_send6( serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0 );
stats_issue_event( EVENT_SCRAPE, FLAG_UDP, scrape_count );
default:
break;
}
if (!left)
OT_PEERFLAG(ws->peer) |= PEER_FLAG_SEEDING;
outpacket[0] = htonl(1); /* announce action */
outpacket[1] = inpacket[12 / 4];
if (OT_PEERFLAG(ws->peer) & PEER_FLAG_STOPPED) { /* Peer is gone. */
ws->reply = ws->outbuf;
ws->reply_size = remove_peer_from_torrent(FLAG_UDP, ws);
} else {
/* Limit amount of peers to OT_MAX_PEERS_UDP */
uint32_t numwant = ntohl(inpacket[92 / 4]);
size_t max_peers = ip6_isv4mapped(remoteip) ? OT_MAX_PEERS_UDP4 : OT_MAX_PEERS_UDP6;
if (numwant > max_peers)
numwant = max_peers;
ws->reply = ws->outbuf + 8;
ws->reply_size = 8 + add_peer_to_torrent_and_return_peers(FLAG_UDP, ws, numwant);
}
socket_send6(serversocket, ws->outbuf, ws->reply_size, remoteip, remoteport, 0);
stats_issue_event(EVENT_ANNOUNCE, FLAG_UDP, ws->reply_size);
break;
case 2: /* This is a scrape action */
outpacket[0] = htonl(2); /* scrape action */
outpacket[1] = inpacket[12 / 4];
for (scrape_count = 0; (scrape_count * 20 < byte_count - 16) && (scrape_count <= 74); scrape_count++)
return_udp_scrape_for_torrent(*(ot_hash *)(((char *)inpacket) + 16 + 20 * scrape_count), ((char *)outpacket) + 8 + 12 * scrape_count);
socket_send6(serversocket, ws->outbuf, 8 + 12 * scrape_count, remoteip, remoteport, 0);
stats_issue_event(EVENT_SCRAPE, FLAG_UDP, scrape_count);
break;
}
return 1;
}
static void* udp_worker( void * args ) {
int64 sock = (int64)args;
static void *udp_worker(void *args) {
int64 sock = (int64)args;
struct ot_workstruct ws;
memset( &ws, 0, sizeof(ws) );
memset(&ws, 0, sizeof(ws));
ws.inbuf=malloc(G_INBUF_SIZE);
ws.outbuf=malloc(G_OUTBUF_SIZE);
#ifdef _DEBUG_HTTPERROR
ws.debugbuf=malloc(G_DEBUGBUF_SIZE);
ws.inbuf = malloc(G_INBUF_SIZE);
ws.outbuf = malloc(G_OUTBUF_SIZE);
#ifdef _DEBUG_HTTPERROR
ws.debugbuf = malloc(G_DEBUGBUF_SIZE);
#endif
while( g_opentracker_running )
handle_udp6( sock, &ws );
while (g_opentracker_running)
handle_udp6(sock, &ws);
free( ws.inbuf );
free( ws.outbuf );
#ifdef _DEBUG_HTTPERROR
free( ws.debugbuf );
free(ws.inbuf);
free(ws.outbuf);
#ifdef _DEBUG_HTTPERROR
free(ws.debugbuf);
#endif
return NULL;
}
void udp_init( int64 sock, unsigned int worker_count ) {
void udp_init(int64 sock, unsigned int worker_count) {
pthread_t thread_id;
if( !g_rijndael_round_key[0] )
if (!g_rijndael_round_key[0])
udp_generate_rijndael_round_key();
#ifdef _DEBUG
fprintf( stderr, " installing %d workers on udp socket %ld\n", worker_count, (unsigned long)sock );
fprintf(stderr, " installing %d workers on udp socket %ld\n", worker_count, (unsigned long)sock);
#endif
while( worker_count-- )
pthread_create( &thread_id, NULL, udp_worker, (void *)sock );
while (worker_count--)
pthread_create(&thread_id, NULL, udp_worker, (void *)sock);
}
const char *g_version_udp_c = "$Source$: $Revision$\n";

View File

@ -6,7 +6,7 @@
#ifndef OT_UDP_H__
#define OT_UDP_H__
void udp_init( int64 sock, unsigned int worker_count );
int handle_udp6( int64 serversocket, struct ot_workstruct *ws );
void udp_init(int64 sock, unsigned int worker_count);
int handle_udp6(int64 serversocket, struct ot_workstruct *ws);
#endif

View File

@ -4,39 +4,37 @@
$id$ */
/* System */
#include <stddef.h>
#include <stdint.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <stdint.h>
/* Opentracker */
#include "trackerlogic.h"
#include "ot_vector.h"
/* Libowfat */
#include "uint32.h"
#include "uint16.h"
#include "uint32.h"
static int vector_compare_peer(const void *peer1, const void *peer2 ) {
return memcmp( peer1, peer2, OT_PEER_COMPARE_SIZE );
}
static int vector_compare_peer6(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE6); }
static int vector_compare_peer4(const void *peer1, const void *peer2) { return memcmp(peer1, peer2, OT_PEER_COMPARE_SIZE4); }
/* This function gives us a binary search that returns a pointer, even if
no exact match is found. In that case it sets exactmatch 0 and gives
calling functions the chance to insert data
*/
void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size,
size_t compare_size, int *exactmatch ) {
void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch) {
size_t interval = member_count;
while( interval ) {
uint8_t *lookat = ((uint8_t*)base) + member_size * ( interval / 2 );
int cmp = memcmp( lookat, key, compare_size );
if(cmp == 0 ) {
while (interval) {
uint8_t *lookat = ((uint8_t *)base) + member_size * (interval / 2);
int cmp = memcmp(lookat, key, compare_size);
if (cmp == 0) {
base = lookat;
break;
}
if(cmp < 0) {
if (cmp < 0) {
base = lookat + member_size;
interval--;
}
@ -44,13 +42,14 @@ void *binary_search( const void * const key, const void * base, const size_t mem
}
*exactmatch = interval;
return (void*)base;
return (void *)base;
}
static uint8_t vector_hash_peer( ot_peer *peer, int bucket_count ) {
unsigned int hash = 5381, i = OT_PEER_COMPARE_SIZE;
uint8_t *p = (uint8_t*)peer;
while( i-- ) hash += (hash<<5) + *(p++);
static uint8_t vector_hash_peer(ot_peer const *peer, size_t compare_size, int bucket_count) {
unsigned int hash = 5381;
uint8_t *p = (uint8_t *)peer;
while (compare_size--)
hash += (hash << 5) + *(p++);
return hash % bucket_count;
}
@ -61,48 +60,65 @@ static uint8_t vector_hash_peer( ot_peer *peer, int bucket_count ) {
if it wasn't found in vector. Caller needs to check the passed "exactmatch" variable to see, whether an insert
took place. If resizing the vector failed, NULL is returned, else the pointer to the object in vector.
*/
void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch ) {
uint8_t *match = binary_search( key, vector->data, vector->size, member_size, compare_size, exactmatch );
void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch) {
uint8_t *match = binary_search(key, vector->data, vector->size, member_size, compare_size, exactmatch);
if( *exactmatch ) return match;
if (*exactmatch)
return match;
if (vector->size + 1 > vector->space) {
size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS;
ptrdiff_t match_off = match - (uint8_t *)vector->data;
uint8_t *new_data = realloc(vector->data, new_space * member_size);
if (!new_data)
return NULL;
if( vector->size + 1 > vector->space ) {
size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS;
uint8_t *new_data = realloc( vector->data, new_space * member_size );
if( !new_data ) return NULL;
/* Adjust pointer if it moved by realloc */
match = new_data + (match - (uint8_t*)vector->data);
match = new_data + match_off;
vector->data = new_data;
vector->data = new_data;
vector->space = new_space;
}
memmove( match + member_size, match, ((uint8_t*)vector->data) + member_size * vector->size - match );
memmove(match + member_size, match, ((uint8_t *)vector->data) + member_size * vector->size - match);
vector->size++;
return match;
}
ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exactmatch ) {
ot_peer *match;
ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch) {
ot_peer *match, *end;
const size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
size_t match_to_end;
/* If space is zero but size is set, we're dealing with a list of vector->size buckets */
if( vector->space < vector->size )
vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, vector->size );
match = (ot_peer*)binary_search( peer, vector->data, vector->size, sizeof(ot_peer), OT_PEER_COMPARE_SIZE, exactmatch );
if (vector->space < vector->size)
vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size);
match = binary_search(peer, vector->data, vector->size, peer_size, compare_size, exactmatch);
if( *exactmatch ) return match;
if (*exactmatch)
return match;
if( vector->size + 1 > vector->space ) {
size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS;
ot_peer *new_data = realloc( vector->data, new_space * sizeof(ot_peer) );
if( !new_data ) return NULL;
/* This is the amount of bytes that needs to be pushed backwards by peer_size bytes to make room for new peer */
end = (ot_peer *)vector->data + vector->size * peer_size;
match_to_end = end - match;
if (vector->size + 1 > vector->space) {
ptrdiff_t offset = match - (ot_peer *)vector->data;
size_t new_space = vector->space ? OT_VECTOR_GROW_RATIO * vector->space : OT_VECTOR_MIN_MEMBERS;
ot_peer *new_data = realloc(vector->data, new_space * peer_size);
if (!new_data)
return NULL;
/* Adjust pointer if it moved by realloc */
match = new_data + (match - (ot_peer*)vector->data);
match = new_data + offset;
vector->data = new_data;
vector->data = new_data;
vector->space = new_space;
}
memmove( match + 1, match, sizeof(ot_peer) * ( ((ot_peer*)vector->data) + vector->size - match ) );
/* Here we're guaranteed to have enough space in vector to move the block of peers after insertion point */
memmove(match + peer_size, match, match_to_end);
vector->size++;
return match;
@ -113,126 +129,134 @@ ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exac
1 if a non-seeding peer was removed
2 if a seeding peer was removed
*/
int vector_remove_peer( ot_vector *vector, ot_peer *peer ) {
int exactmatch;
int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size) {
int exactmatch, was_seeder;
ot_peer *match, *end;
size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
if( !vector->size ) return 0;
if (!vector->size)
return 0;
/* If space is zero but size is set, we're dealing with a list of vector->size buckets */
if( vector->space < vector->size )
vector = ((ot_vector*)vector->data) + vector_hash_peer(peer, vector->size );
if (vector->space < vector->size)
vector = ((ot_vector *)vector->data) + vector_hash_peer(peer, compare_size, vector->size);
end = ((ot_peer*)vector->data) + vector->size;
match = (ot_peer*)binary_search( peer, vector->data, vector->size, sizeof(ot_peer), OT_PEER_COMPARE_SIZE, &exactmatch );
if( !exactmatch ) return 0;
end = ((ot_peer *)vector->data) + peer_size * vector->size;
match = (ot_peer *)binary_search(peer, vector->data, vector->size, peer_size, compare_size, &exactmatch);
if (!exactmatch)
return 0;
exactmatch = ( OT_PEERFLAG( match ) & PEER_FLAG_SEEDING ) ? 2 : 1;
memmove( match, match + 1, sizeof(ot_peer) * ( end - match - 1 ) );
was_seeder = (OT_PEERFLAG_D(match, peer_size) & PEER_FLAG_SEEDING) ? 2 : 1;
memmove(match, match + peer_size, end - match - peer_size);
vector->size--;
vector_fixup_peers( vector );
return exactmatch;
vector_fixup_peers(vector, peer_size);
return was_seeder;
}
void vector_remove_torrent( ot_vector *vector, ot_torrent *match ) {
ot_torrent *end = ((ot_torrent*)vector->data) + vector->size;
void vector_remove_torrent(ot_vector *vector, ot_torrent *match) {
ot_torrent *end = ((ot_torrent *)vector->data) + vector->size;
if( !vector->size ) return;
if (!vector->size)
return;
/* If this is being called after a unsuccessful malloc() for peer_list
in add_peer_to_torrent, match->peer_list actually might be NULL */
if( match->peer_list) free_peerlist( match->peer_list );
free_peerlist(match->peer_list6);
free_peerlist(match->peer_list4);
memmove( match, match + 1, sizeof(ot_torrent) * ( end - match - 1 ) );
if( ( --vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) && ( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) {
memmove(match, match + 1, sizeof(ot_torrent) * (end - match - 1));
if ((--vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) {
vector->space /= OT_VECTOR_SHRINK_RATIO;
vector->data = realloc( vector->data, vector->space * sizeof( ot_torrent ) );
vector->data = realloc(vector->data, vector->space * sizeof(ot_torrent));
}
}
void vector_clean_list( ot_vector * vector, int num_buckets ) {
while( num_buckets-- )
free( vector[num_buckets].data );
free( vector );
void vector_clean_list(ot_vector *vector, int num_buckets) {
while (num_buckets--)
free(vector[num_buckets].data);
free(vector);
return;
}
void vector_redistribute_buckets( ot_peerlist * peer_list ) {
int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1;
ot_vector * bucket_list_new, * bucket_list_old = &peer_list->peers;
void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size) {
int tmp, bucket, bucket_size_new, num_buckets_new, num_buckets_old = 1;
ot_vector *bucket_list_new, *bucket_list_old = &peer_list->peers;
int (*sort_func)(const void *, const void *) = peer_size == OT_PEER_SIZE6 ? &vector_compare_peer6 : &vector_compare_peer4;
if( OT_PEERLIST_HASBUCKETS( peer_list ) ) {
if (OT_PEERLIST_HASBUCKETS(peer_list)) {
num_buckets_old = peer_list->peers.size;
bucket_list_old = peer_list->peers.data;
}
if( peer_list->peer_count < 255 )
if (peer_list->peer_count < 255)
num_buckets_new = 1;
else if( peer_list->peer_count > 8192 )
else if (peer_list->peer_count > 8192)
num_buckets_new = 64;
else if( peer_list->peer_count >= 512 && peer_list->peer_count < 4096 )
else if (peer_list->peer_count >= 512 && peer_list->peer_count < 4096)
num_buckets_new = 16;
else if( peer_list->peer_count < 512 && num_buckets_old <= 16 )
else if (peer_list->peer_count < 512 && num_buckets_old <= 16)
num_buckets_new = num_buckets_old;
else if( peer_list->peer_count < 512 )
else if (peer_list->peer_count < 512)
num_buckets_new = 1;
else if( peer_list->peer_count < 8192 && num_buckets_old > 1 )
else if (peer_list->peer_count < 8192 && num_buckets_old > 1)
num_buckets_new = num_buckets_old;
else
num_buckets_new = 16;
if( num_buckets_new == num_buckets_old )
if (num_buckets_new == num_buckets_old)
return;
/* Assume near perfect distribution */
bucket_list_new = malloc( num_buckets_new * sizeof( ot_vector ) );
if( !bucket_list_new) return;
bzero( bucket_list_new, num_buckets_new * sizeof( ot_vector ) );
bucket_list_new = malloc(num_buckets_new * sizeof(ot_vector));
if (!bucket_list_new)
return;
bzero(bucket_list_new, num_buckets_new * sizeof(ot_vector));
tmp = peer_list->peer_count / num_buckets_new;
tmp = peer_list->peer_count / num_buckets_new;
bucket_size_new = OT_VECTOR_MIN_MEMBERS;
while( bucket_size_new < tmp)
while (bucket_size_new < tmp)
bucket_size_new *= OT_VECTOR_GROW_RATIO;
/* preallocate vectors to hold all peers */
for( bucket=0; bucket<num_buckets_new; ++bucket ) {
for (bucket = 0; bucket < num_buckets_new; ++bucket) {
bucket_list_new[bucket].space = bucket_size_new;
bucket_list_new[bucket].data = malloc( bucket_size_new * sizeof(ot_peer) );
if( !bucket_list_new[bucket].data )
return vector_clean_list( bucket_list_new, num_buckets_new );
bucket_list_new[bucket].data = malloc(bucket_size_new * peer_size);
if (!bucket_list_new[bucket].data)
return vector_clean_list(bucket_list_new, num_buckets_new);
}
/* Now sort them into the correct bucket */
for( bucket=0; bucket<num_buckets_old; ++bucket ) {
ot_peer * peers_old = bucket_list_old[bucket].data, * peers_new;
int peer_count_old = bucket_list_old[bucket].size;
while( peer_count_old-- ) {
ot_vector * bucket_dest = bucket_list_new;
if( num_buckets_new > 1 )
bucket_dest += vector_hash_peer(peers_old, num_buckets_new);
if( bucket_dest->size + 1 > bucket_dest->space ) {
void * tmp = realloc( bucket_dest->data, sizeof(ot_peer) * OT_VECTOR_GROW_RATIO * bucket_dest->space );
if( !tmp ) return vector_clean_list( bucket_list_new, num_buckets_new );
for (bucket = 0; bucket < num_buckets_old; ++bucket) {
ot_peer *peers_old = bucket_list_old[bucket].data;
int peer_count_old = bucket_list_old[bucket].size;
while (peer_count_old--) {
ot_vector *bucket_dest = bucket_list_new;
if (num_buckets_new > 1)
bucket_dest += vector_hash_peer(peers_old, OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size), num_buckets_new);
if (bucket_dest->size + 1 > bucket_dest->space) {
void *tmp = realloc(bucket_dest->data, peer_size * OT_VECTOR_GROW_RATIO * bucket_dest->space);
if (!tmp)
return vector_clean_list(bucket_list_new, num_buckets_new);
bucket_dest->data = tmp;
bucket_dest->space *= OT_VECTOR_GROW_RATIO;
}
peers_new = (ot_peer*)bucket_dest->data;
memcpy(peers_new + bucket_dest->size++, peers_old++, sizeof(ot_peer));
memcpy((ot_peer *)bucket_dest->data + peer_size * bucket_dest->size++, peers_old, peer_size);
peers_old += peer_size;
}
}
/* Now sort each bucket to later allow bsearch */
for( bucket=0; bucket<num_buckets_new; ++bucket )
qsort( bucket_list_new[bucket].data, bucket_list_new[bucket].size, sizeof( ot_peer ), vector_compare_peer );
for (bucket = 0; bucket < num_buckets_new; ++bucket)
qsort(bucket_list_new[bucket].data, bucket_list_new[bucket].size, peer_size, sort_func);
/* Everything worked fine. Now link new bucket_list to peer_list */
if( OT_PEERLIST_HASBUCKETS( peer_list) )
vector_clean_list( (ot_vector*)peer_list->peers.data, peer_list->peers.size );
if (OT_PEERLIST_HASBUCKETS(peer_list))
vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size);
else
free( peer_list->peers.data );
free(peer_list->peers.data);
if( num_buckets_new > 1 ) {
if (num_buckets_new > 1) {
peer_list->peers.data = bucket_list_new;
peer_list->peers.size = num_buckets_new;
peer_list->peers.space = 0; /* Magic marker for "is list of buckets" */
@ -240,27 +264,24 @@ void vector_redistribute_buckets( ot_peerlist * peer_list ) {
peer_list->peers.data = bucket_list_new->data;
peer_list->peers.size = bucket_list_new->size;
peer_list->peers.space = bucket_list_new->space;
free( bucket_list_new );
free(bucket_list_new);
}
}
void vector_fixup_peers( ot_vector * vector ) {
void vector_fixup_peers(ot_vector *vector, size_t peer_size) {
int need_fix = 0;
if( !vector->size ) {
free( vector->data );
vector->data = NULL;
if (!vector->size) {
free(vector->data);
vector->data = NULL;
vector->space = 0;
return;
}
while( ( vector->size * OT_VECTOR_SHRINK_THRESH < vector->space ) &&
( vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS ) ) {
while ((vector->size * OT_VECTOR_SHRINK_THRESH < vector->space) && (vector->space >= OT_VECTOR_SHRINK_RATIO * OT_VECTOR_MIN_MEMBERS)) {
vector->space /= OT_VECTOR_SHRINK_RATIO;
need_fix++;
}
if( need_fix )
vector->data = realloc( vector->data, vector->space * sizeof( ot_peer ) );
if (need_fix)
vector->data = realloc(vector->data, vector->space * peer_size);
}
const char *g_version_vector_c = "$Source$: $Revision$\n";

View File

@ -16,19 +16,21 @@
#define OT_PEER_BUCKET_MAXCOUNT 256
typedef struct {
void *data;
size_t size;
size_t space;
void *data;
size_t size;
size_t space;
} ot_vector;
void *binary_search( const void * const key, const void * base, const size_t member_count, const size_t member_size,
size_t compare_size, int *exactmatch );
void *vector_find_or_insert( ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch );
ot_peer *vector_find_or_insert_peer( ot_vector *vector, ot_peer *peer, int *exactmatch );
void *binary_search(const void *const key, const void *base, const size_t member_count, const size_t member_size, size_t compare_size, int *exactmatch);
void *vector_find_or_insert(ot_vector *vector, void *key, size_t member_size, size_t compare_size, int *exactmatch);
ot_peer *vector_find_or_insert_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size, int *exactmatch);
int vector_remove_peer( ot_vector *vector, ot_peer *peer );
void vector_remove_torrent( ot_vector *vector, ot_torrent *match );
void vector_redistribute_buckets( ot_peerlist * peer_list );
void vector_fixup_peers( ot_vector * vector );
int vector_remove_peer(ot_vector *vector, ot_peer const *peer, size_t peer_size);
void vector_remove_torrent(ot_vector *vector, ot_torrent *match);
/* For ot_clean.c */
void vector_redistribute_buckets(ot_peerlist *peer_list, size_t peer_size);
void vector_fixup_peers(ot_vector *vector, size_t peer_size);
void vector_clean_list(ot_vector *vector, int num_buckets);
#endif

840
proxy.c

File diff suppressed because it is too large Load Diff

View File

@ -45,37 +45,45 @@ static const unsigned char is_unreserved[256] = {
/* Do a fast nibble to hex representation conversion */
static unsigned char fromhex(unsigned char x) {
x-='0'; if( x<=9) return x;
x&=~0x20; x-='A'-'0';
if( x<6 ) return x+10;
x -= '0';
if (x <= 9)
return x;
x &= ~0x20;
x -= 'A' - '0';
if (x < 6)
return x + 10;
return 0xff;
}
/* Skip the value of a param=value pair */
void scan_urlencoded_skipvalue( char **string ) {
const unsigned char* s=*(const unsigned char**) string;
unsigned char f;
void scan_urlencoded_skipvalue(char **string) {
const unsigned char *s = *(const unsigned char **)string;
unsigned char f;
/* Since we are asked to skip the 'value', we assume to stop at
terminators for a 'value' string position */
while( ( f = is_unreserved[ *s++ ] ) & SCAN_SEARCHPATH_VALUE );
while ((f = is_unreserved[*s++]) & SCAN_SEARCHPATH_VALUE)
;
/* If we stopped at a hard terminator like \0 or \n, make the
next scan_urlencoded_query encounter it again */
if( f & SCAN_SEARCHPATH_TERMINATOR ) --s;
if (f & SCAN_SEARCHPATH_TERMINATOR)
--s;
*string = (char*)s;
*string = (char *)s;
}
int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags) {
char *deststring = *string;
ssize_t match_length = scan_urlencoded_query(string, deststring, flags );
int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags) {
char *deststring = *string;
ssize_t match_length = scan_urlencoded_query(string, deststring, flags);
if( match_length < 0 ) return match_length;
if( match_length == 0 ) return -3;
if (match_length < 0)
return match_length;
if (match_length == 0)
return -3;
while( keywords->key ) {
if( !strncmp( keywords->key, deststring, match_length ) && !keywords->key[match_length] )
while (keywords->key) {
if (!strncmp(keywords->key, deststring, match_length) && !keywords->key[match_length])
return keywords->value;
keywords++;
}
@ -84,60 +92,73 @@ int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCH
}
ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_FLAG flags) {
const unsigned char* s=*(const unsigned char**) string;
unsigned char *d = (unsigned char*)deststring;
unsigned char b, c;
const unsigned char *s = *(const unsigned char **)string;
unsigned char *d = (unsigned char *)deststring;
unsigned char b, c;
/* This is the main decoding loop.
'flag' determines, which characters are non-terminating in current context
(ie. stop at '=' and '&' if scanning for a 'param'; stop at '?' if scanning for the path )
*/
while( is_unreserved[ c = *s++ ] & flags ) {
while (is_unreserved[c = *s++] & flags) {
/* When encountering an url escaped character, try to decode */
if( c=='%') {
if( ( b = fromhex(*s++) ) == 0xff ) return -1;
if( ( c = fromhex(*s++) ) == 0xff ) return -1;
c|=(b<<4);
if (c == '%') {
if ((b = fromhex(*s++)) == 0xff)
return -1;
if ((c = fromhex(*s++)) == 0xff)
return -1;
c |= (b << 4);
}
/* Write (possibly decoded) character to output */
*d++ = c;
}
switch( c ) {
case 0: case '\r': case '\n': case ' ':
switch (c) {
case 0:
case '\r':
case '\n':
case ' ':
/* If we started scanning on a hard terminator, indicate we've finished */
if( d == (unsigned char*)deststring ) return -2;
if (d == (unsigned char *)deststring)
return -2;
/* Else make the next call to scan_urlencoded_param encounter it again */
--s;
break;
case '?':
if( flags != SCAN_PATH ) return -1;
if (flags != SCAN_PATH)
return -1;
break;
case '=':
if( flags != SCAN_SEARCHPATH_PARAM ) return -1;
if (flags != SCAN_SEARCHPATH_PARAM)
return -1;
break;
case '&':
if( flags == SCAN_PATH ) return -1;
if( flags == SCAN_SEARCHPATH_PARAM ) --s;
if (flags == SCAN_PATH)
return -1;
if (flags == SCAN_SEARCHPATH_PARAM)
--s;
break;
default:
return -1;
}
*string = (char *)s;
return d - (unsigned char*)deststring;
return d - (unsigned char *)deststring;
}
ssize_t scan_fixed_int( char *data, size_t len, int *tmp ) {
ssize_t scan_fixed_int(char *data, size_t len, int *tmp) {
int minus = 0;
*tmp = 0;
if( *data == '-' ) --len, ++data, ++minus;
while( (len > 0) && (*data >= '0') && (*data <= '9') ) { --len; *tmp = 10**tmp + *data++-'0'; }
if( minus ) *tmp = -*tmp;
*tmp = 0;
if (*data == '-')
--len, ++data, ++minus;
while ((len > 0) && (*data >= '0') && (*data <= '9')) {
--len;
*tmp = 10 * *tmp + *data++ - '0';
}
if (minus)
*tmp = -*tmp;
return len;
}
const char *g_version_scan_urlencoded_query_c = "$Source$: $Revision$\n";

View File

@ -38,18 +38,18 @@ ssize_t scan_urlencoded_query(char **string, char *deststring, SCAN_SEARCHPATH_F
or -2 for terminator found
or -3 for no keyword matched
*/
int scan_find_keywords( const ot_keywords * keywords, char **string, SCAN_SEARCHPATH_FLAG flags);
int scan_find_keywords(const ot_keywords *keywords, char **string, SCAN_SEARCHPATH_FLAG flags);
/* string in: pointer to value of a param=value pair to skip
out: pointer to next scan position on return
*/
void scan_urlencoded_skipvalue( char **string );
void scan_urlencoded_skipvalue(char **string);
/* data pointer to len chars of string
len length of chars in data to parse
number number to receive result
returns number of bytes not parsed, mostly !=0 means fail
*/
ssize_t scan_fixed_int( char *data, size_t len, int *number );
ssize_t scan_fixed_int(char *data, size_t len, int *number);
#endif

View File

@ -2,13 +2,21 @@
while true; do
request_string="GET /announce?info_hash=012345678901234567\
%$(printf %02X $(( $RANDOM & 0xff )) )\
%$(printf %02X $(( $RANDOM & 0xff )) )\
&ip=$(( $RANDOM & 0xff )).17.13.15&port=$(( $RANDOM & 0xff )) HTTP/1.0\n"
$(printf %02X $(( $RANDOM & 0xff )) )\
&ip=$(( $RANDOM & 0xff )).17.13.15&port=$(( $RANDOM & 0xff )) HTTP/1.0"
echo $request_string
echo
echo $request_string | nc 23.23.23.237 6969 >/dev/null
echo
# echo $request_string
# echo
printf "%s\n\n" "$request_string" | nc 84.200.61.9 6969 | hexdump -C
request_string="GET /announce?info_hash=012345678901234567\
$(printf %02X $(( $RANDOM & 0xff )) )\
&ip=2001:1608:6:27::$(( $RANDOM & 0xff ))&port=$(( $RANDOM & 0xff )) HTTP/1.0"
printf "%s\n\n" "$request_string" | nc 2001:1608:6:27::9 6969 | hexdump -C
printf "%s\n\n" "$request_string"
request_string="GET /scrape?info_hash=012345678901234567\
$(printf %02X $(( $RANDOM & 0xff )) ) HTTP/1.0"
printf "%s\n\n" "$request_string" | nc 2001:1608:6:27::9 6969 | hexdump -C
done

View File

@ -4,454 +4,595 @@
$id$ */
/* System */
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <arpa/inet.h>
#include <unistd.h>
#include <errno.h>
#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
/* Libowfat */
#include "array.h"
#include "byte.h"
#include "io.h"
#include "iob.h"
#include "array.h"
#include "ip6.h"
/* Opentracker */
#include "trackerlogic.h"
#include "ot_accesslist.h"
#include "ot_clean.h"
#include "ot_fullscrape.h"
#include "ot_http.h"
#include "ot_livesync.h"
#include "ot_mutex.h"
#include "ot_stats.h"
#include "ot_clean.h"
#include "ot_http.h"
#include "ot_accesslist.h"
#include "ot_fullscrape.h"
#include "ot_livesync.h"
#include "ot_vector.h"
#include "trackerlogic.h"
/* Forward declaration */
size_t return_peers_for_torrent( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto );
size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto);
void free_peerlist( ot_peerlist *peer_list ) {
if( peer_list->peers.data ) {
if( OT_PEERLIST_HASBUCKETS( peer_list ) ) {
ot_vector *bucket_list = (ot_vector*)(peer_list->peers.data);
while( peer_list->peers.size-- )
free( bucket_list++->data );
}
free( peer_list->peers.data );
void free_peerlist(ot_peerlist *peer_list) {
if (peer_list->peers.data) {
if (OT_PEERLIST_HASBUCKETS(peer_list))
vector_clean_list((ot_vector *)peer_list->peers.data, peer_list->peers.size);
else
free(peer_list->peers.data);
}
free( peer_list );
free(peer_list);
}
void add_torrent_from_saved_state( ot_hash hash, ot_time base, size_t down_count ) {
void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count) {
int exactmatch;
ot_torrent *torrent;
ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash );
ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash);
if( !accesslist_hashisvalid( hash ) )
return mutex_bucket_unlock_by_hash( hash, 0 );
if (!accesslist_hashisvalid(hash))
return mutex_bucket_unlock_by_hash(hash, 0);
torrent = vector_find_or_insert( torrents_list, (void*)hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
if( !torrent || exactmatch )
return mutex_bucket_unlock_by_hash( hash, 0 );
torrent = vector_find_or_insert(torrents_list, (void *)hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
if (!torrent || exactmatch)
return mutex_bucket_unlock_by_hash(hash, 0);
/* Create a new torrent entry, then */
memcpy( torrent->hash, hash, sizeof(ot_hash) );
byte_zero(torrent, sizeof(ot_torrent));
memcpy(torrent->hash, hash, sizeof(ot_hash));
if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) {
vector_remove_torrent( torrents_list, torrent );
return mutex_bucket_unlock_by_hash( hash, 0 );
if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) {
vector_remove_torrent(torrents_list, torrent);
return mutex_bucket_unlock_by_hash(hash, 0);
}
byte_zero( torrent->peer_list, sizeof( ot_peerlist ) );
torrent->peer_list->base = base;
torrent->peer_list->down_count = down_count;
byte_zero(torrent->peer_list6, sizeof(ot_peerlist));
byte_zero(torrent->peer_list4, sizeof(ot_peerlist));
torrent->peer_list6->base = base;
torrent->peer_list4->base = base;
torrent->peer_list6->down_count = down_count;
torrent->peer_list4->down_count = down_count;
return mutex_bucket_unlock_by_hash( hash, 1 );
return mutex_bucket_unlock_by_hash(hash, 1);
}
size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount ) {
int exactmatch, delta_torrentcount = 0;
ot_torrent *torrent;
ot_peer *peer_dest;
ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash );
size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount) {
int exactmatch, delta_torrentcount = 0;
ot_torrent *torrent;
ot_peer *peer_dest;
ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash);
ot_peerlist *peer_list;
size_t peer_size; /* initialized in next line */
ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size);
if( !accesslist_hashisvalid( *ws->hash ) ) {
mutex_bucket_unlock_by_hash( *ws->hash, 0 );
if( proto == FLAG_TCP ) {
if (!accesslist_hashisvalid(*ws->hash)) {
mutex_bucket_unlock_by_hash(*ws->hash, 0);
if (proto == FLAG_TCP) {
const char invalid_hash[] = "d14:failure reason63:Requested download is not authorized for use with this tracker.e";
memcpy( ws->reply, invalid_hash, strlen( invalid_hash ) );
return strlen( invalid_hash );
memcpy(ws->reply, invalid_hash, strlen(invalid_hash));
return strlen(invalid_hash);
}
return 0;
}
torrent = vector_find_or_insert( torrents_list, (void*)ws->hash, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
if( !torrent ) {
mutex_bucket_unlock_by_hash( *ws->hash, 0 );
torrent = vector_find_or_insert(torrents_list, (void *)ws->hash, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
if (!torrent) {
mutex_bucket_unlock_by_hash(*ws->hash, 0);
return 0;
}
if( !exactmatch ) {
if (!exactmatch) {
/* Create a new torrent entry, then */
memcpy( torrent->hash, *ws->hash, sizeof(ot_hash) );
byte_zero(torrent, sizeof(ot_torrent));
memcpy(torrent->hash, *ws->hash, sizeof(ot_hash));
if( !( torrent->peer_list = malloc( sizeof (ot_peerlist) ) ) ) {
vector_remove_torrent( torrents_list, torrent );
mutex_bucket_unlock_by_hash( *ws->hash, 0 );
if (!(torrent->peer_list6 = malloc(sizeof(ot_peerlist))) || !(torrent->peer_list4 = malloc(sizeof(ot_peerlist)))) {
vector_remove_torrent(torrents_list, torrent);
mutex_bucket_unlock_by_hash(*ws->hash, 0);
return 0;
}
byte_zero( torrent->peer_list, sizeof( ot_peerlist ) );
byte_zero(torrent->peer_list6, sizeof(ot_peerlist));
byte_zero(torrent->peer_list4, sizeof(ot_peerlist));
delta_torrentcount = 1;
} else
clean_single_torrent( torrent );
clean_single_torrent(torrent);
torrent->peer_list->base = g_now_minutes;
torrent->peer_list6->base = g_now_minutes;
torrent->peer_list4->base = g_now_minutes;
peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
/* Check for peer in torrent */
peer_dest = vector_find_or_insert_peer( &(torrent->peer_list->peers), &ws->peer, &exactmatch );
if( !peer_dest ) {
mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount );
peer_dest = vector_find_or_insert_peer(&(peer_list->peers), peer_src, peer_size, &exactmatch);
if (!peer_dest) {
mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
return 0;
}
/* Tell peer that it's fresh */
OT_PEERTIME( &ws->peer ) = 0;
OT_PEERTIME(ws->peer, OT_PEER_SIZE6) = 0;
/* Sanitize flags: Whoever claims to have completed download, must be a seeder */
if( ( OT_PEERFLAG( &ws->peer ) & ( PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING ) ) == PEER_FLAG_COMPLETED )
OT_PEERFLAG( &ws->peer ) ^= PEER_FLAG_COMPLETED;
if ((OT_PEERFLAG(ws->peer) & (PEER_FLAG_COMPLETED | PEER_FLAG_SEEDING)) == PEER_FLAG_COMPLETED)
OT_PEERFLAG(ws->peer) ^= PEER_FLAG_COMPLETED;
/* If we hadn't had a match create peer there */
if( !exactmatch ) {
if (!exactmatch) {
#ifdef WANT_SYNC_LIVE
if( proto == FLAG_MCA )
OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_FROM_SYNC;
if (proto == FLAG_MCA)
OT_PEERFLAG(ws->peer) |= PEER_FLAG_FROM_SYNC;
else
livesync_tell( ws );
livesync_tell(ws);
#endif
torrent->peer_list->peer_count++;
if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) {
torrent->peer_list->down_count++;
stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws );
peer_list->peer_count++;
if (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED) {
peer_list->down_count++;
stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws);
}
if( OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING )
torrent->peer_list->seed_count++;
if (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING)
peer_list->seed_count++;
} else {
stats_issue_event( EVENT_RENEW, 0, OT_PEERTIME( peer_dest ) );
stats_issue_event(EVENT_RENEW, 0, OT_PEERTIME(peer_dest, peer_size));
#ifdef WANT_SPOT_WOODPECKER
if( ( OT_PEERTIME(peer_dest) > 0 ) && ( OT_PEERTIME(peer_dest) < 20 ) )
stats_issue_event( EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer );
if ((OT_PEERTIME(peer_dest, peer_size) > 0) && (OT_PEERTIME(peer_dest, peer_size) < 20))
stats_issue_event(EVENT_WOODPECKER, 0, (uintptr_t)&ws->peer);
#endif
#ifdef WANT_SYNC_LIVE
/* Won't live sync peers that come back too fast. Only exception:
fresh "completed" reports */
if( proto != FLAG_MCA ) {
if( OT_PEERTIME( peer_dest ) > OT_CLIENT_SYNC_RENEW_BOUNDARY ||
( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) ) )
livesync_tell( ws );
if (proto != FLAG_MCA) {
if (OT_PEERTIME(peer_dest, peer_size) > OT_CLIENT_SYNC_RENEW_BOUNDARY ||
(!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED)))
livesync_tell(ws);
}
#endif
if( (OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && !(OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) )
torrent->peer_list->seed_count--;
if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_SEEDING ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_SEEDING ) )
torrent->peer_list->seed_count++;
if( !(OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED ) && (OT_PEERFLAG(&ws->peer) & PEER_FLAG_COMPLETED ) ) {
torrent->peer_list->down_count++;
stats_issue_event( EVENT_COMPLETED, 0, (uintptr_t)ws );
if ((OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && !(OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING))
peer_list->seed_count--;
if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_SEEDING) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_SEEDING))
peer_list->seed_count++;
if (!(OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED) && (OT_PEERFLAG(ws->peer) & PEER_FLAG_COMPLETED)) {
peer_list->down_count++;
stats_issue_event(EVENT_COMPLETED, 0, (uintptr_t)ws);
}
if( OT_PEERFLAG(peer_dest) & PEER_FLAG_COMPLETED )
OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_COMPLETED;
if (OT_PEERFLAG_D(peer_dest, peer_size) & PEER_FLAG_COMPLETED)
OT_PEERFLAG(ws->peer) |= PEER_FLAG_COMPLETED;
}
memcpy( peer_dest, &ws->peer, sizeof(ot_peer) );
memcpy(peer_dest, peer_src, peer_size);
#ifdef WANT_SYNC
if( proto == FLAG_MCA ) {
mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount );
if (proto == FLAG_MCA) {
mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
return 0;
}
#endif
ws->reply_size = return_peers_for_torrent( ws, torrent, amount, ws->reply, proto );
mutex_bucket_unlock_by_hash( *ws->hash, delta_torrentcount );
ws->reply_size = return_peers_for_torrent(ws, torrent, amount, ws->reply, proto);
mutex_bucket_unlock_by_hash(*ws->hash, delta_torrentcount);
return ws->reply_size;
}
static size_t return_peers_all( ot_peerlist *peer_list, char *reply ) {
static size_t return_peers_all(ot_peerlist *peer_list, size_t peer_size, char *reply) {
unsigned int bucket, num_buckets = 1;
ot_vector * bucket_list = &peer_list->peers;
size_t result = OT_PEER_COMPARE_SIZE * peer_list->peer_count;
char * r_end = reply + result;
ot_vector *bucket_list = &peer_list->peers;
size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
size_t result = compare_size * peer_list->peer_count;
char *r_end = reply + result;
if( OT_PEERLIST_HASBUCKETS(peer_list) ) {
if (OT_PEERLIST_HASBUCKETS(peer_list)) {
num_buckets = bucket_list->size;
bucket_list = (ot_vector *)bucket_list->data;
}
for( bucket = 0; bucket<num_buckets; ++bucket ) {
ot_peer * peers = (ot_peer*)bucket_list[bucket].data;
size_t peer_count = bucket_list[bucket].size;
while( peer_count-- ) {
if( OT_PEERFLAG(peers) & PEER_FLAG_SEEDING ) {
r_end-=OT_PEER_COMPARE_SIZE;
memcpy(r_end,peers++,OT_PEER_COMPARE_SIZE);
for (bucket = 0; bucket < num_buckets; ++bucket) {
ot_peer *peers = bucket_list[bucket].data;
size_t peer_count = bucket_list[bucket].size;
while (peer_count--) {
if (OT_PEERFLAG_D(peers, peer_size) & PEER_FLAG_SEEDING) {
r_end -= compare_size;
memcpy(r_end, peers, compare_size);
} else {
memcpy(reply,peers++,OT_PEER_COMPARE_SIZE);
reply+=OT_PEER_COMPARE_SIZE;
memcpy(reply, peers, compare_size);
reply += compare_size;
}
peers += peer_size;
}
}
return result;
}
static size_t return_peers_selection( struct ot_workstruct *ws, ot_peerlist *peer_list, size_t amount, char *reply ) {
static size_t return_peers_selection(struct ot_workstruct *ws, ot_peerlist *peer_list, size_t peer_size, size_t amount, char *reply) {
unsigned int bucket_offset, bucket_index = 0, num_buckets = 1;
ot_vector * bucket_list = &peer_list->peers;
unsigned int shifted_pc = peer_list->peer_count;
ot_vector *bucket_list = &peer_list->peers;
unsigned int shifted_pc = peer_list->peer_count;
unsigned int shifted_step = 0;
unsigned int shift = 0;
size_t result = OT_PEER_COMPARE_SIZE * amount;
char * r_end = reply + result;
unsigned int shift = 0;
size_t compare_size = OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(peer_size);
size_t result = compare_size * amount;
char *r_end = reply + result;
if( OT_PEERLIST_HASBUCKETS(peer_list) ) {
if (OT_PEERLIST_HASBUCKETS(peer_list)) {
num_buckets = bucket_list->size;
bucket_list = (ot_vector *)bucket_list->data;
}
/* Make fixpoint arithmetic as exact as possible */
#define MAXPRECBIT (1<<(8*sizeof(int)-3))
while( !(shifted_pc & MAXPRECBIT ) ) { shifted_pc <<= 1; shift++; }
shifted_step = shifted_pc/amount;
#define MAXPRECBIT (1 << (8 * sizeof(int) - 3))
while (!(shifted_pc & MAXPRECBIT)) {
shifted_pc <<= 1;
shift++;
}
shifted_step = shifted_pc / amount;
#undef MAXPRECBIT
/* Initialize somewhere in the middle of peers so that
fixpoint's aliasing doesn't alway miss the same peers */
bucket_offset = nrand48(ws->rand48_state) % peer_list->peer_count;
while( amount-- ) {
ot_peer * peer;
while (amount--) {
ot_peer *peer;
/* This is the aliased, non shifted range, next value may fall into */
unsigned int diff = ( ( ( amount + 1 ) * shifted_step ) >> shift ) -
( ( amount * shifted_step ) >> shift );
bucket_offset += 1 + nrand48(ws->rand48_state) % diff;
unsigned int diff = (((amount + 1) * shifted_step) >> shift) - ((amount * shifted_step) >> shift);
bucket_offset += 1 + nrand48(ws->rand48_state) % diff;
while( bucket_offset >= bucket_list[bucket_index].size ) {
while (bucket_offset >= bucket_list[bucket_index].size) {
bucket_offset -= bucket_list[bucket_index].size;
bucket_index = ( bucket_index + 1 ) % num_buckets;
bucket_index = (bucket_index + 1) % num_buckets;
}
peer = ((ot_peer*)bucket_list[bucket_index].data) + bucket_offset;
if( OT_PEERFLAG(peer) & PEER_FLAG_SEEDING ) {
r_end-=OT_PEER_COMPARE_SIZE;
memcpy(r_end,peer,OT_PEER_COMPARE_SIZE);
peer = bucket_list[bucket_index].data + peer_size * bucket_offset;
if (OT_PEERFLAG_D(peer, peer_size) & PEER_FLAG_SEEDING) {
r_end -= compare_size;
memcpy(r_end, peer, compare_size);
} else {
memcpy(reply,peer,OT_PEER_COMPARE_SIZE);
reply+=OT_PEER_COMPARE_SIZE;
memcpy(reply, peer, compare_size);
reply += compare_size;
}
}
return result;
}
/* Compiles a list of random peers for a torrent
* reply must have enough space to hold 92+6*amount bytes
* does not yet check not to return self
*/
size_t return_peers_for_torrent( struct ot_workstruct * ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto ) {
ot_peerlist *peer_list = torrent->peer_list;
char *r = reply;
static size_t return_peers_for_torrent_udp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) {
char *r = reply;
size_t peer_size = peer_size_from_peer6(&ws->peer);
ot_peerlist *peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
if( amount > peer_list->peer_count )
if (amount > peer_list->peer_count)
amount = peer_list->peer_count;
if( proto == FLAG_TCP ) {
int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie" PEERS_BENCODED "%zd:", peer_list->seed_count, peer_list->down_count, peer_list->peer_count-peer_list->seed_count, erval, erval/2, OT_PEER_COMPARE_SIZE*amount );
} else {
*(uint32_t*)(r+0) = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM );
*(uint32_t*)(r+4) = htonl( peer_list->peer_count - peer_list->seed_count );
*(uint32_t*)(r+8) = htonl( peer_list->seed_count );
r += 12;
}
*(uint32_t *)(r + 0) = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM);
*(uint32_t *)(r + 4) = htonl(peer_count - seed_count);
*(uint32_t *)(r + 8) = htonl(seed_count);
r += 12;
if( amount ) {
if( amount == peer_list->peer_count )
r += return_peers_all( peer_list, r );
if (amount) {
if (amount == peer_list->peer_count)
r += return_peers_all(peer_list, peer_size, r);
else
r += return_peers_selection( ws, peer_list, amount, r );
r += return_peers_selection(ws, peer_list, peer_size, amount, r);
}
return r - reply;
}
static size_t return_peers_for_torrent_tcp(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply) {
char *r = reply;
int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
size_t seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
size_t down_count = torrent->peer_list6->down_count + torrent->peer_list4->down_count;
size_t peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - seed_count;
/* Simple case: amount of peers in both lists is less than requested, here we return all results */
size_t amount_v4 = torrent->peer_list4->peer_count;
size_t amount_v6 = torrent->peer_list6->peer_count;
/* Complex case: both lists have more than enough entries and we need to split between v4 and v6 clients */
if (amount_v4 + amount_v6 > amount) {
size_t amount_left, percent_v6 = 0, percent_v4 = 0, left_v6, left_v4;
const size_t SCALE = 1024;
/* If possible, fill at least a quarter of peer from each family */
if (amount / 4 <= amount_v4)
amount_v4 = amount / 4;
if (amount / 4 <= amount_v6)
amount_v6 = amount / 4;
/* Fill the rest according to which family's pool provides more peers */
amount_left = amount - (amount_v4 + amount_v6);
left_v4 = torrent->peer_list4->peer_count - amount_v4;
left_v6 = torrent->peer_list6->peer_count - amount_v6;
if (left_v4 + left_v6) {
percent_v4 = (SCALE * left_v4) / (left_v4 + left_v6);
percent_v6 = (SCALE * left_v6) / (left_v4 + left_v6);
}
amount_v4 += (amount_left * percent_v4) / SCALE;
amount_v6 += (amount_left * percent_v6) / SCALE;
/* Integer division rounding can leave out a peer */
if (amount_v4 + amount_v6 < amount && amount_v6 < torrent->peer_list6->peer_count)
++amount_v6;
if (amount_v4 + amount_v6 < amount && amount_v4 < torrent->peer_list4->peer_count)
++amount_v4;
}
if( proto == FLAG_TCP )
*r++ = 'e';
r +=
sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zde8:intervali%ie12:min intervali%ie", seed_count, down_count, peer_count, erval, erval / 2);
if (amount_v4) {
r += sprintf(r, PEERS_BENCODED4 "%zd:", OT_PEER_COMPARE_SIZE4 * amount_v4);
if (amount_v4 == torrent->peer_list4->peer_count)
r += return_peers_all(torrent->peer_list4, OT_PEER_SIZE4, r);
else
r += return_peers_selection(ws, torrent->peer_list4, OT_PEER_SIZE4, amount_v4, r);
}
if (amount_v6) {
r += sprintf(r, PEERS_BENCODED6 "%zd:", OT_PEER_COMPARE_SIZE6 * amount_v6);
if (amount_v6 == torrent->peer_list6->peer_count)
r += return_peers_all(torrent->peer_list6, OT_PEER_SIZE6, r);
else
r += return_peers_selection(ws, torrent->peer_list6, OT_PEER_SIZE6, amount_v6, r);
}
*r++ = 'e';
return r - reply;
}
/* Compiles a list of random peers for a torrent
* Reply must have enough space to hold:
* 92 + 6 * amount bytes for TCP/IPv4
* 92 + 18 * amount bytes for TCP/IPv6
* 12 + 6 * amount bytes for UDP/IPv4
* 12 + 18 * amount bytes for UDP/IPv6
* Does not yet check not to return self
*/
size_t return_peers_for_torrent(struct ot_workstruct *ws, ot_torrent *torrent, size_t amount, char *reply, PROTO_FLAG proto) {
return proto == FLAG_TCP ? return_peers_for_torrent_tcp(ws, torrent, amount, reply) : return_peers_for_torrent_udp(ws, torrent, amount, reply);
}
/* Fetches scrape info for a specific torrent */
size_t return_udp_scrape_for_torrent( ot_hash hash, char *reply ) {
int exactmatch, delta_torrentcount = 0;
ot_vector *torrents_list = mutex_bucket_lock_by_hash( hash );
ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply) {
int exactmatch, delta_torrentcount = 0;
ot_vector *torrents_list = mutex_bucket_lock_by_hash(hash);
ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
if( !exactmatch ) {
memset( reply, 0, 12);
if (!exactmatch) {
memset(reply, 0, 12);
} else {
uint32_t *r = (uint32_t*) reply;
uint32_t *r = (uint32_t *)reply;
if( clean_single_torrent( torrent ) ) {
vector_remove_torrent( torrents_list, torrent );
memset( reply, 0, 12);
if (clean_single_torrent(torrent)) {
vector_remove_torrent(torrents_list, torrent);
memset(reply, 0, 12);
delta_torrentcount = -1;
} else {
r[0] = htonl( torrent->peer_list->seed_count );
r[1] = htonl( torrent->peer_list->down_count );
r[2] = htonl( torrent->peer_list->peer_count-torrent->peer_list->seed_count );
r[0] = htonl(torrent->peer_list6->seed_count + torrent->peer_list4->seed_count);
r[1] = htonl(torrent->peer_list6->down_count + torrent->peer_list4->down_count);
r[2] = htonl(torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count);
}
}
mutex_bucket_unlock_by_hash( hash, delta_torrentcount );
mutex_bucket_unlock_by_hash(hash, delta_torrentcount);
return 12;
}
/* Fetches scrape info for a specific torrent */
size_t return_tcp_scrape_for_torrent( ot_hash *hash_list, int amount, char *reply ) {
size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply) {
char *r = reply;
int exactmatch, i;
r += sprintf( r, "d5:filesd" );
r += sprintf(r, "d5:filesd");
for( i=0; i<amount; ++i ) {
int delta_torrentcount = 0;
ot_hash *hash = hash_list + i;
ot_vector *torrents_list = mutex_bucket_lock_by_hash( *hash );
ot_torrent *torrent = binary_search( hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
for (i = 0; i < amount; ++i) {
int delta_torrentcount = 0;
ot_hash const *hash = hash_list + i;
ot_vector *torrents_list = mutex_bucket_lock_by_hash(*hash);
ot_torrent *torrent = binary_search(hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
if( exactmatch ) {
if( clean_single_torrent( torrent ) ) {
vector_remove_torrent( torrents_list, torrent );
if (exactmatch) {
if (clean_single_torrent(torrent)) {
vector_remove_torrent(torrents_list, torrent);
delta_torrentcount = -1;
} else {
*r++='2';*r++='0';*r++=':';
memcpy( r, hash, sizeof(ot_hash) ); r+=sizeof(ot_hash);
r += sprintf( r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee",
torrent->peer_list->seed_count, torrent->peer_list->down_count, torrent->peer_list->peer_count-torrent->peer_list->seed_count );
*r++ = '2';
*r++ = '0';
*r++ = ':';
memcpy(r, hash, sizeof(ot_hash));
r += sizeof(ot_hash);
r += sprintf(r, "d8:completei%zde10:downloadedi%zde10:incompletei%zdee", torrent->peer_list6->seed_count + torrent->peer_list4->seed_count,
torrent->peer_list6->down_count + torrent->peer_list4->down_count,
torrent->peer_list6->peer_count + torrent->peer_list4->peer_count - torrent->peer_list6->seed_count - torrent->peer_list4->seed_count);
}
}
mutex_bucket_unlock_by_hash( *hash, delta_torrentcount );
mutex_bucket_unlock_by_hash(*hash, delta_torrentcount);
}
*r++ = 'e'; *r++ = 'e';
*r++ = 'e';
*r++ = 'e';
return r - reply;
}
static ot_peerlist dummy_list;
size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws ) {
int exactmatch;
ot_vector *torrents_list = mutex_bucket_lock_by_hash( *ws->hash );
ot_torrent *torrent = binary_search( ws->hash, torrents_list->data, torrents_list->size, sizeof( ot_torrent ), OT_HASH_COMPARE_SIZE, &exactmatch );
ot_peerlist *peer_list = &dummy_list;
size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws) {
int exactmatch;
ot_vector *torrents_list = mutex_bucket_lock_by_hash(*ws->hash);
ot_torrent *torrent = binary_search(ws->hash, torrents_list->data, torrents_list->size, sizeof(ot_torrent), OT_HASH_COMPARE_SIZE, &exactmatch);
ot_peerlist *peer_list = &dummy_list;
size_t peer_size; /* initialized in next line */
ot_peer const *peer_src = peer_from_peer6(&ws->peer, &peer_size);
size_t peer_count = 0, seed_count = 0;
#ifdef WANT_SYNC_LIVE
if( proto != FLAG_MCA ) {
OT_PEERFLAG( &ws->peer ) |= PEER_FLAG_STOPPED;
livesync_tell( ws );
if (proto != FLAG_MCA) {
OT_PEERFLAG(ws->peer) |= PEER_FLAG_STOPPED;
livesync_tell(ws);
}
#endif
if( exactmatch ) {
peer_list = torrent->peer_list;
switch( vector_remove_peer( &peer_list->peers, &ws->peer ) ) {
case 2: peer_list->seed_count--; /* Intentional fallthrough */
case 1: peer_list->peer_count--; /* Intentional fallthrough */
default: break;
if (exactmatch) {
peer_list = peer_size == OT_PEER_SIZE6 ? torrent->peer_list6 : torrent->peer_list4;
switch (vector_remove_peer(&peer_list->peers, peer_src, peer_size)) {
case 2:
peer_list->seed_count--; /* Intentional fallthrough */
case 1:
peer_list->peer_count--; /* Intentional fallthrough */
default:
break;
}
peer_count = torrent->peer_list6->peer_count + torrent->peer_list4->peer_count;
seed_count = torrent->peer_list6->seed_count + torrent->peer_list4->seed_count;
}
if( proto == FLAG_TCP ) {
int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
ws->reply_size = sprintf( ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie" PEERS_BENCODED "0:e", peer_list->seed_count, peer_list->peer_count - peer_list->seed_count, erval, erval / 2 );
if (proto == FLAG_TCP) {
int erval = OT_CLIENT_REQUEST_INTERVAL_RANDOM;
ws->reply_size = sprintf(ws->reply, "d8:completei%zde10:incompletei%zde8:intervali%ie12:min intervali%ie%s0:e", seed_count, peer_count - seed_count, erval,
erval / 2, peer_size == OT_PEER_SIZE6 ? PEERS_BENCODED6 : PEERS_BENCODED4);
}
/* Handle UDP reply */
if( proto == FLAG_UDP ) {
((uint32_t*)ws->reply)[2] = htonl( OT_CLIENT_REQUEST_INTERVAL_RANDOM );
((uint32_t*)ws->reply)[3] = htonl( peer_list->peer_count - peer_list->seed_count );
((uint32_t*)ws->reply)[4] = htonl( peer_list->seed_count);
ws->reply_size = 20;
if (proto == FLAG_UDP) {
((uint32_t *)ws->reply)[2] = htonl(OT_CLIENT_REQUEST_INTERVAL_RANDOM);
((uint32_t *)ws->reply)[3] = htonl(peer_count - seed_count);
((uint32_t *)ws->reply)[4] = htonl(seed_count);
ws->reply_size = 20;
}
mutex_bucket_unlock_by_hash( *ws->hash, 0 );
mutex_bucket_unlock_by_hash(*ws->hash, 0);
return ws->reply_size;
}
void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data ) {
int bucket;
void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data) {
int bucket;
size_t j;
for( bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) {
ot_vector *torrents_list = mutex_bucket_lock( bucket );
ot_torrent *torrents = (ot_torrent*)(torrents_list->data);
for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
ot_vector *torrents_list = mutex_bucket_lock(bucket);
ot_torrent *torrents = (ot_torrent *)(torrents_list->data);
for( j=0; j<torrents_list->size; ++j )
if( for_each( torrents + j, data ) )
for (j = 0; j < torrents_list->size; ++j)
if (for_each(torrents + j, data))
break;
mutex_bucket_unlock( bucket, 0 );
if( !g_opentracker_running ) return;
mutex_bucket_unlock(bucket, 0);
if (!g_opentracker_running)
return;
}
}
void exerr( char * message ) {
fprintf( stderr, "%s\n", message );
exit( 111 );
ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size) {
ot_ip6 *ip = (ot_ip6 *)peer;
if (!ip6_isv4mapped(ip)) {
*peer_size = OT_PEER_SIZE6;
return (ot_peer *)peer;
}
*peer_size = OT_PEER_SIZE4;
return (ot_peer *)(((uint8_t *)peer) + 12);
}
void trackerlogic_init( ) {
size_t peer_size_from_peer6(ot_peer6 *peer) {
ot_ip6 *ip = (ot_ip6 *)peer;
if (!ip6_isv4mapped(ip))
return OT_PEER_SIZE6;
return OT_PEER_SIZE4;
}
#ifdef _DEBUG_RANDOMTORRENTS
void trackerlogic_add_random_torrents(size_t amount) {
struct ot_workstruct ws;
memset(&ws, 0, sizeof(ws));
ws.inbuf = malloc(G_INBUF_SIZE);
ws.outbuf = malloc(G_OUTBUF_SIZE);
ws.reply = ws.outbuf;
ws.hash = (ot_hash *)ws.inbuf;
while (amount--) {
arc4random_buf(ws.hash, sizeof(ot_hash));
arc4random_buf(&ws.peer, sizeof(ws.peer));
OT_PEERFLAG(ws.peer) &= PEER_FLAG_SEEDING | PEER_FLAG_COMPLETED | PEER_FLAG_STOPPED;
add_peer_to_torrent_and_return_peers(FLAG_TCP, &ws, 1);
}
free(ws.inbuf);
free(ws.outbuf);
}
#endif
void exerr(char *message) {
fprintf(stderr, "%s\n", message);
exit(111);
}
void trackerlogic_init() {
g_tracker_id = random();
if( !g_stats_path )
if (!g_stats_path)
g_stats_path = "stats";
g_stats_path_len = strlen( g_stats_path );
g_stats_path_len = strlen(g_stats_path);
/* Initialise background worker threads */
mutex_init( );
clean_init( );
fullscrape_init( );
accesslist_init( );
livesync_init( );
stats_init( );
mutex_init();
clean_init();
fullscrape_init();
accesslist_init();
livesync_init();
stats_init();
}
void trackerlogic_deinit( void ) {
int bucket, delta_torrentcount = 0;
void trackerlogic_deinit(void) {
int bucket, delta_torrentcount = 0;
size_t j;
/* Free all torrents... */
for(bucket=0; bucket<OT_BUCKET_COUNT; ++bucket ) {
ot_vector *torrents_list = mutex_bucket_lock( bucket );
if( torrents_list->size ) {
for( j=0; j<torrents_list->size; ++j ) {
ot_torrent *torrent = ((ot_torrent*)(torrents_list->data)) + j;
free_peerlist( torrent->peer_list );
for (bucket = 0; bucket < OT_BUCKET_COUNT; ++bucket) {
ot_vector *torrents_list = mutex_bucket_lock(bucket);
if (torrents_list->size) {
for (j = 0; j < torrents_list->size; ++j) {
ot_torrent *torrent = ((ot_torrent *)(torrents_list->data)) + j;
free_peerlist(torrent->peer_list6);
free_peerlist(torrent->peer_list4);
delta_torrentcount -= 1;
}
free( torrents_list->data );
free(torrents_list->data);
}
mutex_bucket_unlock( bucket, delta_torrentcount );
mutex_bucket_unlock(bucket, delta_torrentcount);
}
/* Deinitialise background worker threads */
stats_deinit( );
livesync_deinit( );
accesslist_deinit( );
fullscrape_deinit( );
clean_deinit( );
stats_deinit();
livesync_deinit();
accesslist_deinit();
fullscrape_deinit();
clean_deinit();
/* Release mutexes */
mutex_deinit( );
mutex_deinit();
}
const char *g_version_trackerlogic_c = "$Source$: $Revision$\n";

View File

@ -6,11 +6,11 @@
#ifndef OT_TRACKERLOGIC_H__
#define OT_TRACKERLOGIC_H__
#include <sys/types.h>
#include <sys/time.h>
#include <time.h>
#include <stdint.h>
#include <stdlib.h>
#include <sys/time.h>
#include <sys/types.h>
#include <time.h>
#if defined(__linux__) && defined(WANT_ARC4RANDOM)
#include <bsd/stdlib.h>
@ -22,111 +22,133 @@
typedef uint8_t ot_hash[20];
typedef time_t ot_time;
typedef char ot_ip6[16];
typedef struct { ot_ip6 address; int bits; }
ot_net;
#ifdef WANT_V6
#define OT_IP_SIZE 16
#define PEERS_BENCODED "6:peers6"
#else
#define OT_IP_SIZE 4
#define PEERS_BENCODED "5:peers"
#endif
typedef struct {
ot_ip6 address;
int bits;
} ot_net;
/* List of peers should fit in a single UDP packet (around 1200 bytes) */
#define OT_MAX_PEERS_UDP6 66
#define OT_MAX_PEERS_UDP4 200
#define OT_IP_SIZE6 16
#define OT_IP_SIZE4 4
#define OT_PORT_SIZE 2
#define OT_FLAG_SIZE 1
#define OT_TIME_SIZE 1
/* Some tracker behaviour tunable */
#define OT_CLIENT_TIMEOUT 30
#define OT_CLIENT_TIMEOUT 30
#define OT_CLIENT_TIMEOUT_CHECKINTERVAL 10
#define OT_CLIENT_TIMEOUT_SEND (60*15)
#define OT_CLIENT_REQUEST_INTERVAL (60*30)
#define OT_CLIENT_REQUEST_VARIATION (60*6)
#define OT_CLIENT_TIMEOUT_SEND (60 * 15)
#define OT_CLIENT_REQUEST_INTERVAL (60 * 30)
#define OT_CLIENT_REQUEST_VARIATION (60 * 6)
#define OT_TORRENT_TIMEOUT_HOURS 24
#define OT_TORRENT_TIMEOUT (60*OT_TORRENT_TIMEOUT_HOURS)
#define OT_TORRENT_TIMEOUT_HOURS 24
#define OT_TORRENT_TIMEOUT (60 * OT_TORRENT_TIMEOUT_HOURS)
#define OT_CLIENT_REQUEST_INTERVAL_RANDOM ( OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION/2 + (int)( nrand48(ws->rand48_state) % OT_CLIENT_REQUEST_VARIATION ) )
#define OT_CLIENT_REQUEST_INTERVAL_RANDOM \
(OT_CLIENT_REQUEST_INTERVAL - OT_CLIENT_REQUEST_VARIATION / 2 + (int)(nrand48(ws->rand48_state) % OT_CLIENT_REQUEST_VARIATION))
/* If WANT_MODEST_FULLSCRAPES is on, ip addresses may not
fullscrape more frequently than this amount in seconds */
#define OT_MODEST_PEER_TIMEOUT (60*5)
#define OT_MODEST_PEER_TIMEOUT (60 * 5)
/* If peers come back before 10 minutes, don't live sync them */
#define OT_CLIENT_SYNC_RENEW_BOUNDARY 10
/* Number of tracker admin ip addresses allowed */
#define OT_ADMINIP_MAX 64
#define OT_MAX_THREADS 64
#define OT_ADMINIP_MAX 64
#define OT_MAX_THREADS 64
#define OT_PEER_TIMEOUT 45
/* Number of minutes after announce before peer is removed */
#define OT_PEER_TIMEOUT 45
/* We maintain a list of 1024 pointers to sorted list of ot_torrent structs
Sort key is, of course, its hash */
#define OT_BUCKET_COUNT_BITS 10
#define OT_BUCKET_COUNT_BITS 10
#define OT_BUCKET_COUNT (1<<OT_BUCKET_COUNT_BITS)
#define OT_BUCKET_COUNT_SHIFT (32-OT_BUCKET_COUNT_BITS)
#define OT_BUCKET_COUNT (1 << OT_BUCKET_COUNT_BITS)
#define OT_BUCKET_COUNT_SHIFT (32 - OT_BUCKET_COUNT_BITS)
/* if _DEBUG_RANDOMTORRENTS is set, this is the amount of torrents to create
on startup */
#define RANDOMTORRENTS (1024 * 1024 * 1)
/* From opentracker.c */
extern time_t g_now_seconds;
extern time_t g_now_seconds;
extern volatile int g_opentracker_running;
#define g_now_minutes (g_now_seconds/60)
#define g_now_minutes (g_now_seconds / 60)
extern uint32_t g_tracker_id;
typedef enum { FLAG_TCP, FLAG_UDP, FLAG_MCA, FLAG_SELFPIPE } PROTO_FLAG;
typedef struct {
uint8_t data[OT_IP_SIZE+2+2];
} ot_peer;
#define OT_PEER_COMPARE_SIZE6 ((OT_IP_SIZE6) + (OT_PORT_SIZE))
#define OT_PEER_COMPARE_SIZE4 ((OT_IP_SIZE4) + (OT_PORT_SIZE))
#define OT_PEER_COMPARE_SIZE_FROM_PEER_SIZE(PEER_SIZE) ((PEER_SIZE) - (OT_TIME_SIZE) - (OT_FLAG_SIZE))
#define OT_PEER_SIZE6 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE6))
#define OT_PEER_SIZE4 ((OT_TIME_SIZE) + (OT_FLAG_SIZE) + (OT_PEER_COMPARE_SIZE4))
typedef uint8_t ot_peer; /* Generic pointer to a v6 or v4 peer */
typedef uint8_t ot_peer6[OT_PEER_SIZE6];
typedef uint8_t ot_peer4[OT_PEER_SIZE4];
static const uint8_t PEER_FLAG_SEEDING = 0x80;
static const uint8_t PEER_FLAG_COMPLETED = 0x40;
static const uint8_t PEER_FLAG_STOPPED = 0x20;
static const uint8_t PEER_FLAG_FROM_SYNC = 0x10;
static const uint8_t PEER_FLAG_LEECHING = 0x00;
#ifdef WANT_V6
#define OT_SETIP(peer,ip) memcpy((peer),(ip),(OT_IP_SIZE))
#else
#define OT_SETIP(peer,ip) memcpy((peer),(((uint8_t*)ip)+12),(OT_IP_SIZE))
#endif
#define OT_SETPORT(peer,port) memcpy(((uint8_t*)(peer))+(OT_IP_SIZE),(port),2)
#define OT_PEERFLAG(peer) (((uint8_t*)(peer))[(OT_IP_SIZE)+2])
#define OT_PEERTIME(peer) (((uint8_t*)(peer))[(OT_IP_SIZE)+3])
/* Takes an ot_peer6 and returns the proper pointer to the peer and sets peer_size */
ot_peer *peer_from_peer6(ot_peer6 *peer, size_t *peer_size);
size_t peer_size_from_peer6(ot_peer6 *peer);
#define OT_HASH_COMPARE_SIZE (sizeof(ot_hash))
#define OT_PEER_COMPARE_SIZE ((OT_IP_SIZE)+2)
/* New style */
#define OT_SETIP(peer, ip) memcpy((uint8_t *)(peer), (ip), OT_IP_SIZE6)
#define OT_SETPORT(peer, port) memcpy(((uint8_t *)(peer)) + (OT_IP_SIZE6), (port), 2)
#define OT_PEERFLAG(peer) (((uint8_t *)(peer))[(OT_IP_SIZE6) + 2])
#define OT_PEERFLAG_D(peer, peersize) (((uint8_t *)(peer))[(peersize) - 2])
#define OT_PEERTIME(peer, peersize) (((uint8_t *)(peer))[(peersize) - 1])
#define PEERS_BENCODED6 "6:peers6"
#define PEERS_BENCODED4 "5:peers"
#define OT_HASH_COMPARE_SIZE (sizeof(ot_hash))
struct ot_peerlist;
typedef struct ot_peerlist ot_peerlist;
typedef struct {
ot_hash hash;
ot_peerlist *peer_list;
ot_peerlist *peer_list6;
ot_peerlist *peer_list4;
} ot_torrent;
#include "ot_vector.h"
struct ot_peerlist {
ot_time base;
size_t seed_count;
size_t peer_count;
size_t down_count;
/* normal peers vector or
pointer to ot_vector[32] buckets if data != NULL and space == 0
*/
ot_vector peers;
ot_time base;
size_t seed_count;
size_t peer_count;
size_t down_count;
/* normal peers vector or
pointer to ot_vector[32] buckets if data != NULL and space == 0
*/
ot_vector peers;
};
#define OT_PEERLIST_HASBUCKETS(peer_list) ((peer_list)->peers.size > (peer_list)->peers.space)
struct ot_workstruct {
/* Thread specific, static */
char *inbuf;
#define G_INBUF_SIZE 8192
char *outbuf;
#define G_OUTBUF_SIZE 8192
#ifdef _DEBUG_HTTPERROR
char *debugbuf;
#define G_DEBUGBUF_SIZE 8192
char *inbuf;
#define G_INBUF_SIZE 8192
char *outbuf;
#define G_OUTBUF_SIZE 8192
#ifdef _DEBUG_HTTPERROR
char *debugbuf;
#define G_DEBUGBUF_SIZE 8192
#endif
/* The peer currently in the working */
ot_peer peer;
ot_peer6 peer; /* Can fit v6 and v4 peers */
/* Pointers into the request buffer */
ot_hash *hash;
@ -155,31 +177,34 @@ struct ot_workstruct {
#endif
#ifdef WANT_SYNC
#define WANT_SYNC_PARAM( param ) , param
#define WANT_SYNC_PARAM(param) , param
#else
#define WANT_SYNC_PARAM( param )
#define WANT_SYNC_PARAM(param)
#endif
#ifdef WANT_LOG_NETWORKS
#error Live logging networks disabled at the moment.
#endif
void trackerlogic_init( void );
void trackerlogic_deinit( void );
void exerr( char * message );
void trackerlogic_init(void);
void trackerlogic_deinit(void);
void exerr(char *message);
/* add_peer_to_torrent does only release the torrent bucket if from_sync is set,
otherwise it is released in return_peers_for_torrent */
size_t add_peer_to_torrent_and_return_peers( PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount );
size_t remove_peer_from_torrent( PROTO_FLAG proto, struct ot_workstruct *ws );
size_t return_tcp_scrape_for_torrent( ot_hash *hash, int amount, char *reply );
size_t return_udp_scrape_for_torrent( ot_hash hash, char *reply );
void add_torrent_from_saved_state( ot_hash hash, ot_time base, size_t down_count );
size_t add_peer_to_torrent_and_return_peers(PROTO_FLAG proto, struct ot_workstruct *ws, size_t amount);
size_t remove_peer_from_torrent(PROTO_FLAG proto, struct ot_workstruct *ws);
size_t return_tcp_scrape_for_torrent(ot_hash const *hash_list, int amount, char *reply);
size_t return_udp_scrape_for_torrent(ot_hash const hash, char *reply);
void add_torrent_from_saved_state(ot_hash const hash, ot_time base, size_t down_count);
#ifdef _DEBUG_RANDOMTORRENTS
void trackerlogic_add_random_torrents(size_t amount);
#endif
/* torrent iterator */
void iterate_all_torrents( int (*for_each)( ot_torrent* torrent, uintptr_t data ), uintptr_t data );
void iterate_all_torrents(int (*for_each)(ot_torrent *torrent, uintptr_t data), uintptr_t data);
/* Helper, before it moves to its own object */
void free_peerlist( ot_peerlist *peer_list );
void free_peerlist(ot_peerlist *peer_list);
#endif