Compare commits

..

No commits in common. "main" and "v1.0.0" have entirely different histories.
main ... v1.0.0

132 changed files with 5037 additions and 11348 deletions

3
.github/FUNDING.yml vendored
View file

@ -1,3 +0,0 @@
---
github:
- "jzelinskie"

View file

@ -1,23 +0,0 @@
---
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "monthly"
labels:
- "component/dependencies"
- package-ecosystem: "gomod"
directory: "/"
schedule:
interval: "monthly"
labels:
- "component/dependencies"
- package-ecosystem: "docker"
directory: "/"
schedule:
interval: "monthly"
labels:
- "component/dependencies"

View file

@ -1,112 +0,0 @@
---
name: "Build & Test"
on:
push:
branches:
- "!dependabot/*"
- "main"
pull_request:
branches: ["*"]
jobs:
build:
name: "Go Build"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Build"
run: "go build ./cmd/..."
unit:
name: "Run Unit Tests"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Run `go test`"
run: "go test -race ./..."
e2e-mem:
name: "E2E Memory Tests"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install and configure chihaya"
run: |
go install ./cmd/chihaya
cat ./dist/example_config.yaml
- name: "Run end-to-end tests"
run: |
chihaya --config=./dist/example_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
e2e-redis:
name: "E2E Redis Tests"
runs-on: "ubuntu-latest"
services:
redis:
image: "redis"
ports: ["6379:6379"]
options: "--entrypoint redis-server"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install and configure chihaya"
run: |
go install ./cmd/chihaya
curl -LO https://github.com/jzelinskie/faq/releases/download/0.0.6/faq-linux-amd64
chmod +x faq-linux-amd64
./faq-linux-amd64 '.chihaya.storage = {"config":{"gc_interval":"3m","peer_lifetime":"31m","prometheus_reporting_interval":"1s","redis_broker":"redis://127.0.0.1:6379/0","redis_connect_timeout":"15s","redis_read_timeout":"15s","redis_write_timeout":"15s"},"name":"redis"}' ./dist/example_config.yaml > ./dist/example_redis_config.yaml
cat ./dist/example_redis_config.yaml
- name: "Run end-to-end tests"
run: |
chihaya --config=./dist/example_redis_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
image-build:
name: "Docker Build"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "docker/setup-qemu-action@v1"
- uses: "docker/setup-buildx-action@v1"
with:
driver-opts: "image=moby/buildkit:master"
- uses: "docker/build-push-action@v1"
with:
push: false
tags: "latest"
helm:
name: "Helm Template"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- name: "Install Helm"
uses: "engineerd/configurator@v0.0.5"
with:
name: "helm"
pathInArchive: "linux-amd64/helm"
fromGitHubReleases: true
repo: "helm/helm"
version: "^v3"
urlTemplate: "https://get.helm.sh/helm-{{version}}-linux-amd64.tar.gz"
token: "${{ secrets.GITHUB_TOKEN }}"
- name: "Run `helm template`"
working-directory: "./dist/helm/chihaya"
run: "helm template . --debug"

View file

@ -1,86 +0,0 @@
---
name: "Lint"
on:
push:
branches:
- "!dependabot/*"
- "main"
pull_request:
branches: ["*"]
jobs:
go-mod-tidy:
name: "Lint Go Modules"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Run `go mod tidy`"
run: "go mod tidy && bash -c '[ $(git status --porcelain | tee /dev/fd/2 | wc -c) -eq 0 ]'"
go-fmt:
name: "Format Go"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install gofumpt"
run: "go install mvdan.cc/gofumpt@latest"
- name: "Run `gofumpt`"
run: |
GOFUMPT_OUTPUT="$(find . -iname '*.go' -type f | xargs gofumpt -d)"
if [ -n "$GOFUMPT_OUTPUT" ]; then
echo "The following files are not correctly formatted:"
echo "${GOFUMPT_OUTPUT}"
exit 1
fi
go-lint:
name: "Lint Go"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- uses: "golangci/golangci-lint-action@v2"
with:
version: "v1.43"
skip-go-installation: true
skip-pkg-cache: true
skip-build-cache: false
extra-lint:
name: "Lint YAML & Markdown"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "bewuethr/yamllint-action@v1.1.1"
with:
config-file: ".yamllint"
- uses: "nosborn/github-action-markdown-cli@v2.0.0"
with:
files: "."
config_file: ".markdownlint.yaml"
codeql:
name: "Analyze with CodeQL"
runs-on: "ubuntu-latest"
permissions:
actions: "read"
contents: "read"
security-events: "write"
strategy:
fail-fast: false
matrix:
language: ["go"]
steps:
- uses: "actions/checkout@v2"
- uses: "github/codeql-action/init@v1"
with:
languages: "${{ matrix.language }}"
- uses: "github/codeql-action/autobuild@v1"
- uses: "github/codeql-action/analyze@v1"

3
.gitignore vendored Normal file
View file

@ -0,0 +1,3 @@
/config.json
/chihaya
/Godeps/_workspace

View file

@ -1,50 +0,0 @@
---
run:
timeout: "5m"
output:
sort-results: true
linters-settings:
goimports:
local-prefixes: "github.com/chihaya/chihaya"
gosec:
excludes:
- "G404" # Allow the usage of math/rand
linters:
enable:
- "bidichk"
- "bodyclose"
- "deadcode"
- "errcheck"
- "errname"
- "errorlint"
- "gofumpt"
- "goimports"
- "goprintffuncname"
- "gosec"
- "gosimple"
- "govet"
- "ifshort"
- "importas"
- "ineffassign"
- "makezero"
- "prealloc"
- "predeclared"
- "revive"
- "rowserrcheck"
- "staticcheck"
- "structcheck"
- "stylecheck"
- "tenv"
- "typecheck"
- "unconvert"
- "unused"
- "varcheck"
- "wastedassign"
- "whitespace"
issues:
include:
- "EXC0012" # Exported should have comment
- "EXC0012" # Exported should have comment
- "EXC0013" # Package comment should be of form
- "EXC0014" # Comment on exported should be of form
- "EXC0015" # Should have a package comment

View file

@ -1,3 +0,0 @@
---
line-length: false
no-hard-tabs: false

24
.travis.yml Normal file
View file

@ -0,0 +1,24 @@
language: go
go:
- 1.5
- tip
sudo: false
before_install:
- go get github.com/tools/godep
- godep restore
script:
- go test -v ./...
notifications:
irc:
channels:
- "irc.freenode.net#chihaya"
use_notice: true
skip_join: true
on_success: always
on_failure: always
email: false

View file

@ -1,11 +0,0 @@
# vim: ft=yaml
---
yaml-files:
- "*.yaml"
- "*.yml"
- ".yamllint"
ignore: "dist/helm/"
extends: "default"
rules:
quoted-strings: "enable"
line-length: "disable"

5
AUTHORS Normal file
View file

@ -0,0 +1,5 @@
# This is the official list of Chihaya authors for copyright purposes, in alphabetical order.
Jimmy Zelinskie <jimmyzelinskie@gmail.com>
Justin Li <jli@j-li.net>

237
CONFIGURATION.md Normal file
View file

@ -0,0 +1,237 @@
# Configuration
Chihaya's behaviour is customized by setting up a JSON configuration file.
Available keys are as follows:
##### `httpListenAddr`
type: string
default: "localhost:6881"
The listen address for the HTTP server. If only a port is specified, the tracker will listen on all interfaces. If left empty, the tracker will not run a HTTP endpoint.
##### `httpRequestTimeout`
type: duration
default: "4s"
The duration to allow outstanding requests to survive before forcefully terminating them.
##### `httpReadTimeout`
type: duration
default: "4s"
The maximum duration before timing out read of the request.
##### `httpWriteTimeout`
type: duration
default: "4s"
The maximum duration before timing out write of the request.
##### `httpListenLimit`
type: integer
default: 0
Limits the number of outstanding requests. Set to `0` to disable.
##### `udpListenAddr`
type: string
default: "localhost:6881"
Then listen address for the UDP server. If only a port is specified, the tracker will listen on all interfaces. If left empty, the tracker will not run a UDP endpoint.
##### `createOnAnnounce`
type: bool
default: true
Whether to register new torrents with the tracker when any client announces (`true`), or to return an error if the torrent doesn't exist (`false`).
##### `purgeInactiveTorrents`
type: bool
default: true
If torrents should be forgotten when there are no active peers.
##### `announce`
type: duration
default: "30m"
The announce `interval` value sent to clients. This specifies how long clients should wait between regular announces.
##### `minAnnounce`
type: duration
default: "30m"
The announce `min_interval` value sent to clients. This theoretically specifies the minimum allowed time between announces, but most clients don't really respect it.
##### `defaultNumWant`
type: integer
default: 50
The default maximum number of peers to return if the client has not requested a specific number.
##### `allowIPSpoofing`
type: bool
default: true
Whether peers are allowed to set their own IP via the various supported methods or if these are ignored. This must be enabled for dual-stack IP support, since there is no other way to determine both IPs of a peer otherwise.
##### `dualStackedPeers`
type: bool
default: true
True if peers may have both an IPv4 and IPv6 address, otherwise only one IP per peer will be used.
##### `realIPHeader`
type: string
default: blank
An optional HTTP header indicating the upstream IP, for example `X-Forwarded-For` or `X-Real-IP`. Use this when running the tracker behind a reverse proxy.
##### `respectAF`
type: bool
default: false
Whether responses should only include peers of the same address family as the announcing peer, or if peers of any family may be returned (i.e. both IPv4 and IPv6).
##### `clientWhitelistEnabled`
type: bool
default: false
Enables the peer ID whitelist.
##### `clientWhitelist`
type: array of strings
default: []
List of peer ID prefixes to allow if `client_whitelist_enabled` is set to true.
##### `torrentMapShards`
type: integer
default: 1
Number of internal torrent maps to use. Leave this at 1 in general, however it can potentially improve performance when there are many unique torrents and few peers per torrent.
##### `reapInterval`
type: duration
default: "60s"
Interval at which a search for inactive peers should be performed.
##### `reapRatio`
type: float64
default: 1.25
Peers will be rated inactive if they haven't announced for `reapRatio * minAnnounce`.
##### `apiListenAddr`
type: string
default: "localhost:6880"
The listen address for the HTTP API. If only a port is specified, the tracker will listen on all interfaces. If left empty, the tracker will not run the HTTP API.
##### `apiRequestTimeout`
type: duration
default: "4s"
The duration to allow outstanding requests to survive before forcefully terminating them.
##### `apiReadTimeout`
type: duration
default: "4s"
The maximum duration before timing out read of the request.
##### `apiWriteTimeout`
type: duration
default: "4s"
The maximum duration before timing out write of the request.
##### `apiListenLimit`
type: integer
default: 0
Limits the number of outstanding requests. Set to `0` to disable.
##### `driver`
type: string
default: "noop"
Sets the backend driver to load. The included `"noop"` driver provides no functionality.
##### `statsBufferSize`
type: integer
default: 0
The size of the event-queues for statistics.
##### `includeMemStats`
type: bool
default: true
Whether to include information about memory in the statistics.
##### `verboseMemStats`
type: bool
default: false
Whether the information about memory should be verbose.
##### `memStatsInterval`
type: duration
default: "5s"
Interval at which to collect statistics about memory.
##### `jwkSetURI`
type: string
default: ""
If this string is not empty, then the tracker will attempt to use JWTs to validate infohashes before announces. The format for the JSON at this endpoint can be found at [the RFC for JWKs](https://tools.ietf.org/html/draft-ietf-jose-json-web-key-41#page-10) with the addition of an "issuer" key. Simply stated, this feature requires two fields at this JSON endpoint: "keys" and "issuer". "keys" is a list of JWKs that can be used to validate JWTs and "issuer" should match the "iss" claim in the JWT. The lifetime of a JWK is based upon standard HTTP caching headers and falls back to 5 minutes if no cache headers are provided.
#### `jwkSetUpdateInterval`
type: duration
default: "5m"
The interval at which keys are updated from JWKSetURI. Because the fallback lifetime for keys without cache headers is 5 minutes, this value should never be below 5 minutes unless you know your jwkSetURI has caching headers.
#### `jwtAudience`
type: string
default: ""
The audience claim that is used to validate JWTs.

View file

@ -1,3 +1,77 @@
## Contributing to LBRY
## Communication
https://lbry.tech/contribute
Currently, real time conversation happens on [#chihaya] on [freenode].
We are currently attempting to have more information available on GitHub.
[#chihaya]: http://webchat.freenode.net?channels=chihaya
[freenode]: http://freenode.net
## Pull request procedure
Please don't write massive patches without prior communication, as it will most
likely lead to confusion and time wasted for everyone. However, small
unannounced fixes are always welcome!
Pull requests will be treated as "review requests", and we will give
feedback we expect to see corrected on [style] and substance before merging.
Changes contributed via pull request should focus on a single issue at a time,
like any other. We will not accept pull-requests that try to "sneak" unrelated
changes in.
The average contribution flow is as follows:
- Create a topic branch from where you want to base your work. This is usually `master`.
- Make commits of logical units.
- Make sure your commit messages are in the [proper format]
- Push your changes to a topic branch in your fork of the repository.
- Submit a pull request.
- Your PR will be reviewed and merged by one of the maintainers.
Any new files should include the license header found at the top of every
source file.
[style]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md#style
[proper format]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md#commit-messages
## Style
### Go
The project follows idiomatic [Go conventions] for style. If you're just
starting out writing Go, you can check out this [meta-package] that documents
style idiomatic style decisions you will find in open source Go code.
[Go conventions]: https://github.com/golang/go/wiki/CodeReviewComments
[meta-package]: https://github.com/jzelinskie/conventions
### Commit Messages
We follow a rough convention for commit messages that is designed to answer two
questions: what changed and why. The subject line should feature the what and
the body of the commit should describe the why.
```
scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and
start for debugging.
Fixes #38
```
The format can be described more formally as follows:
```
<subsystem>: <what changed>
<BLANK LINE>
<why this change was made>
<BLANK LINE>
<footer>
```
The first line is the subject and should be no longer than 70 characters, the
second line is always blank, and other lines should be wrapped at 80 characters.
This allows the message to be easier to read on GitHub as well as in various
git tools.

View file

@ -1,26 +1,33 @@
FROM golang:alpine AS build-env
LABEL maintainer "Jimmy Zelinskie <jimmyzelinskie+git@gmail.com>"
# vim: ft=dockerfile
FROM golang
MAINTAINER Jimmy Zelinskie <jimmyzelinskie@gmail.com>
# Install OS-level dependencies.
RUN apk add --no-cache curl git
# Add files
WORKDIR /go/src/github.com/chihaya/chihaya/
RUN mkdir -p /go/src/github.com/chihaya/chihaya/
# Copy our source code into the container.
WORKDIR /go/src/github.com/chihaya/chihaya
COPY . /go/src/github.com/chihaya/chihaya
# Dependencies
RUN go get github.com/tools/godep
ADD Godeps /go/src/github.com/chihaya/chihaya/Godeps
RUN godep restore
# Install our golang dependencies and compile our binary.
RUN CGO_ENABLED=0 go install ./cmd/chihaya
# Add source
ADD *.go /go/src/github.com/chihaya/chihaya/
ADD api /go/src/github.com/chihaya/chihaya/api
ADD cmd /go/src/github.com/chihaya/chihaya/cmd
ADD config /go/src/github.com/chihaya/chihaya/config
ADD http /go/src/github.com/chihaya/chihaya/http
ADD stats /go/src/github.com/chihaya/chihaya/stats
ADD tracker /go/src/github.com/chihaya/chihaya/tracker
ADD udp /go/src/github.com/chihaya/chihaya/udp
FROM alpine:latest
RUN apk add --no-cache ca-certificates
COPY --from=build-env /go/bin/chihaya /chihaya
# Install
RUN go install github.com/chihaya/chihaya/cmd/chihaya
RUN adduser -D chihaya
# Configuration/environment
VOLUME ["/config"]
EXPOSE 6880-6882
# Expose a docker interface to our binary.
EXPOSE 6880 6969
# Drop root privileges
USER chihaya
ENTRYPOINT ["/chihaya"]
# docker run -p 6880-6882:6880-6882 -v $PATH_TO_DIR_WITH_CONF_FILE:/config:ro -e quay.io/jzelinskie/chihaya:latest -v=5
ENTRYPOINT ["chihaya", "-config=/config/config.json", "-logtostderr=true"]
CMD ["-v=5"]

57
Godeps/Godeps.json generated Normal file
View file

@ -0,0 +1,57 @@
{
"ImportPath": "github.com/chihaya/chihaya",
"GoVersion": "go1.5.1",
"Deps": [
{
"ImportPath": "github.com/chihaya/bencode",
"Rev": "3c485a8d166ff6a79baba90c2c2da01c8348e930"
},
{
"ImportPath": "github.com/coreos/go-oidc/http",
"Rev": "ec2746d2ccb220e81c41b0b0cb2d4a1cc23f7950"
},
{
"ImportPath": "github.com/coreos/go-oidc/jose",
"Rev": "ec2746d2ccb220e81c41b0b0cb2d4a1cc23f7950"
},
{
"ImportPath": "github.com/coreos/go-systemd/journal",
"Comment": "v4-36-gdd4f6b8",
"Rev": "dd4f6b87c2a80813d1a01790344322da19ff195e"
},
{
"ImportPath": "github.com/coreos/pkg/capnslog",
"Rev": "2c77715c4df99b5420ffcae14ead08f52104065d"
},
{
"ImportPath": "github.com/golang/glog",
"Rev": "fca8c8854093a154ff1eb580aae10276ad6b1b5f"
},
{
"ImportPath": "github.com/julienschmidt/httprouter",
"Comment": "v1.1-14-g21439ef",
"Rev": "21439ef4d70ba4f3e2a5ed9249e7b03af4019b40"
},
{
"ImportPath": "github.com/pushrax/bufferpool",
"Rev": "7d6e1653dee10a165d1f357f3a57bc8031e9621b"
},
{
"ImportPath": "github.com/pushrax/faststats",
"Rev": "0fc2c5e41a187240ffaa09320eea7df9f8071388"
},
{
"ImportPath": "github.com/pushrax/flatjson",
"Rev": "86044f1c998d49053e13293029414ddb63f3a422"
},
{
"ImportPath": "github.com/tylerb/graceful",
"Comment": "v1.2.3",
"Rev": "48afeb21e2fcbcff0f30bd5ad6b97747b0fae38e"
},
{
"ImportPath": "golang.org/x/net/netutil",
"Rev": "520af5de654dc4dd4f0f65aa40e66dbbd9043df1"
}
]
}

5
Godeps/Readme generated Normal file
View file

@ -0,0 +1,5 @@
This directory tree is generated automatically by godep.
Please do not edit.
See https://github.com/tools/godep for more information.

18
LICENSE
View file

@ -1,21 +1,3 @@
The MIT License (MIT)
Copyright (c) 2015-2022 LBRY Inc
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the
following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Chihaya is released under a BSD 2-Clause license, reproduced below.
Copyright (c) 2015, The Chihaya Authors

View file

@ -1,3 +0,0 @@
Jimmy Zelinskie <jimmyzelinskie@gmail.com> (@jzelinskie) pkg:*
Justin Li <jli@j-li.net> (@pushrax) pkg:*
Leo Balduf <balduf@hm.edu> (@mrd0ll4r) pkg:*

185
README.md
View file

@ -1,146 +1,99 @@
# LBRY Tracker
# Chihaya
The LBRY tracker is a server that helps peers find each other. It was forked from [Chihaya](https://github.com/chihaya/chihaya), an open-source [BitTorrent tracker](https://en.wikipedia.org/wiki/BitTorrent_tracker).
[![GoDoc](https://godoc.org/github.com/chihaya/chihaya?status.svg)](https://godoc.org/github.com/chihaya/chihaya)
[![License](https://img.shields.io/badge/license-BSD-blue.svg)](https://en.wikipedia.org/wiki/BSD_licenses#2-clause_license_.28.22Simplified_BSD_License.22_or_.22FreeBSD_License.22.29)
[![Build Status](https://api.travis-ci.org/chihaya/chihaya.svg?branch=master)](https://travis-ci.org/chihaya/chihaya)
[![Docker Repository on Quay.io](https://quay.io/repository/jzelinskie/chihaya/status "Docker Repository on Quay.io")](https://quay.io/repository/jzelinskie/chihaya)
Chihaya is a high-performance [BitTorrent tracker] written in the Go
programming language. It is still heavily under development and the current
`master` branch should probably not be used in production
(unless you know what you're doing).
## Installation and Usage
Current features include:
### Building from HEAD
- Public tracker feature-set with full compatibility with what exists of the BitTorrent spec
- Low resource consumption, and fast, asynchronous request processing
- Full IPv6 support, including handling for dual-stacked peers
- Extensive metrics for visibility into the tracker and swarm's performance
- Ability to prioritize peers in local subnets to reduce backbone contention
- JWT Validation to approve the usage of a given infohash.
In order to compile the project, the [latest stable version of Go] and knowledge of a [working Go environment] are required.
Planned features include:
```sh
git clone git@github.com:lbryio/tracker.git
cd tracker
go build ./cmd/chihaya
./chihaya --help
```
- Private tracker feature-set with compatibility for a [Gazelle]-like deployment (WIP)
[latest stable version of Go]: https://golang.org/dl
[working Go environment]: https://golang.org/doc/code.html
[BitTorrent tracker]: http://en.wikipedia.org/wiki/BitTorrent_tracker
[gazelle]: https://github.com/whatcd/gazelle
### Testing
## When would I use Chihaya?
The following will run all tests and benchmarks.
Removing `-bench` will just run unit tests.
Chihaya is a eventually meant for every kind of BitTorrent tracker deployment.
Chihaya has been used to replace instances of [opentracker] and also instances of [ocelot].
Chihaya handles torrent announces and scrapes in memory.
However, using a backend driver, Chihaya can also asynchronously provide deltas to maintain a set of persistent data without throttling a database.
This is particularly useful behavior for private tracker use-cases.
```sh
go test -bench $(go list ./...)
```
[opentracker]: http://erdgeist.org/arts/software/opentracker
[ocelot]: https://github.com/WhatCD/Ocelot
The tracker executable contains a command to end-to-end test a BitTorrent tracker.
See
```sh
tracker --help
```
## Running Chihaya
### Configuration
Configuration of the tracker is done via one YAML configuration file.
The `dist/` directory contains an example configuration file.
Files and directories under `docs/` contain detailed information about configuring middleware, storage implementations, architecture etc.
Copy [`example_config.json`] to your choice of location, and update the values as required.
An explanation of the available keys can be found in [CONFIGURATION.md].
This is an example for an UDP server running on 9252 with metrics enabled. Remember to **change the private key** to some random string.
[`example_config.json`]: https://github.com/chihaya/chihaya/blob/master/example_config.json
[CONFIGURATION.md]: https://github.com/chihaya/chihaya/blob/master/CONFIGURATION.md
```
---
chihaya:
announce_interval: "30m"
min_announce_interval: "15m"
metrics_addr: "0.0.0.0:6880"
udp:
addr: "0.0.0.0:9252"
max_clock_skew: "10s"
private_key: ">>>>CHANGE THIS TO SOME RANDOM THING<<<<"
enable_request_timing: false
allow_ip_spoofing: false
max_numwant: 100
default_numwant: 50
max_scrape_infohashes: 50
storage:
name: "memory"
config:
gc_interval: "3m"
peer_lifetime: "31m"
shard_count: 1024
prometheus_reporting_interval: "1s"
### Docker
```sh
$ docker pull quay.io/jzelinskie/chihaya:latest
$ export CHIHAYA_LOG_LEVEL=5 # most verbose, and the default
$ docker run -p 6880-6882:6880-6882 -v $PATH_TO_DIR_WITH_CONF_FILE:/config:ro -e quay.io/jzelinskie/chihaya:latest -v=$CHIHAYA_LOG_LEVEL
```
# Running from Docker
## Developing Chihaya
This section assumes `docker` and `docker-compose` to be installed on a Linux distro. Please check official docs on how to install [Docker Engine](https://docs.docker.com/engine/install/) and [Docker Compose](https://docs.docker.com/compose/install/).
### Building & Installing
## Docker Compose from lbry/tracker
In order to define a tracker service and let Docker Compose manage it, create a file named `docker-compose.yml` with:
```
version: "3"
services:
tracker:
image: lbry/tracker
command: --config /config/conf.yml
volumes:
- .:/config
network_mode: host
restart: always
```
Unfortunately the tracker does not work without `network_mode: host` due some bug with UDP on Docker. In this mode, firewall configuration needs to be done manually. If using `ufw`, try `ufw allow 9252`.
Chihaya requires Go 1.5+ (preferrably the latest stable Go), [Godep], and a [Go environment] previously set up.
Now, move the configuration to the same directory as `docker-compose.yml`, naming it `conf.yml`. If it is not ready, check the configuration section above.
[Godep]: https://github.com/tools/godep
[Go environment]: https://golang.org/doc/code.html
Start the tracker by running the following in the same directory as the compose file:
`docker-compose up -d`
Logs can be read with:
`docker-compose logs`
To stop:
`docker-compose down`
## Building the containter
A Dockerfile is provided within the repo. To build the container locally, run this command on the same directory the repo was cloned:
`sudo docker build -f Dockerfile . -t some_name/tracker:latest`
It will produce an image called `some_name/tracker`, which can be used in the Docker Compose section.
# Running from source as a service
For ease of maintenance, it is recommended to run the tracker as a service.
This is an example for running it under as the current user using `systemd`:
```
[Unit]
Description=Chihaya BT tracker
After=network.target
[Service]
Type=simple
#User=chihaya
#Group=chihaya
WorkingDirectory=/home/user/github/tracker
ExecStart=/home/user/github/tracker/chihaya --config dist/example_config.yaml
Restart=on-failure
[Install]
WantedBy=multi-user.target
```sh
$ export GOPATH=$PWD/chihaya
$ git clone https://github.com/chihaya/chihaya.git chihaya/src/github.com/chihaya/chihaya
$ cd chihaya/src/github.com/chihaya/chihaya/cmd/chihaya/
$ godep restore
$ go install github.com/chihaya/chihaya/cmd/chihaya
```
To try it, change `/home/user/github/tracker` to where the code was cloned and run:
```bash=
mkdir -p ~/.config/systemd/user
# PASTE FILE IN ~/.config/systemd/user/tracker.service
systemctl --user enable tracker
systemctl --user start tracker
systemctl --user status tracker
### Testing
Chihaya has end-to-end test coverage for announces in addition to unit tests for isolated components.
To run the tests, use:
```sh
$ cd $GOPATH/src/github.com/chihaya/chihaya
$ godep go test -v ./...
```
## Contributing
There is also a set of benchmarks for performance-critical sections of Chihaya.
These can be run similarly:
Contributions to this project are welcome, encouraged, and compensated. For more details, please check [this](https://lbry.tech/contribute) link.
```sh
$ cd $GOPATH/src/github.com/chihaya/chihaya
$ godep go test -v ./... -bench .
```
## License
### Contributing
LBRY's code changes are MIT licensed, and the upstream Chihaya code is licensed under a BSD 2-Clause license. For the full license, see [LICENSE](LICENSE).
See [CONTRIBUTING.md] for guidelines to contributing to the project.
Feel free to make issues or ask questions.
Our maintainers are also always idle in #chihaya on freenode.
## Security
We take security seriously. Please contact security@lbry.com regarding any security issues. [Our PGP key is here](https://lbry.com/faq/pgp-key) if you need it.
## Contact
The primary contact for this project is [@shyba](mailto:vshyba@lbry.com).
[CONTRIBUTING.md]: https://github.com/chihaya/chihaya/blob/master/CONTRIBUTING.md

162
api/api.go Normal file
View file

@ -0,0 +1,162 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package api implements a RESTful HTTP JSON API server for a BitTorrent
// tracker.
package api
import (
"net"
"net/http"
"time"
"github.com/golang/glog"
"github.com/julienschmidt/httprouter"
"github.com/tylerb/graceful"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker"
)
// Server represents an API server for a torrent tracker.
type Server struct {
config *config.Config
tracker *tracker.Tracker
grace *graceful.Server
stopping bool
}
// NewServer returns a new API server for a given configuration and tracker
// instance.
func NewServer(cfg *config.Config, tkr *tracker.Tracker) *Server {
return &Server{
config: cfg,
tracker: tkr,
}
}
// Stop cleanly shuts down the server.
func (s *Server) Stop() {
if !s.stopping {
s.grace.Stop(s.grace.Timeout)
}
}
// Serve runs an API server, blocking until the server has shut down.
func (s *Server) Serve() {
glog.V(0).Info("Starting API on ", s.config.APIConfig.ListenAddr)
if s.config.APIConfig.ListenLimit != 0 {
glog.V(0).Info("Limiting connections to ", s.config.APIConfig.ListenLimit)
}
grace := &graceful.Server{
Timeout: s.config.APIConfig.RequestTimeout.Duration,
ConnState: s.connState,
ListenLimit: s.config.APIConfig.ListenLimit,
NoSignalHandling: true,
Server: &http.Server{
Addr: s.config.APIConfig.ListenAddr,
Handler: newRouter(s),
ReadTimeout: s.config.APIConfig.ReadTimeout.Duration,
WriteTimeout: s.config.APIConfig.WriteTimeout.Duration,
},
}
s.grace = grace
grace.SetKeepAlivesEnabled(false)
grace.ShutdownInitiated = func() { s.stopping = true }
if err := grace.ListenAndServe(); err != nil {
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
glog.Errorf("Failed to gracefully run API server: %s", err.Error())
return
}
}
glog.Info("API server shut down cleanly")
}
// newRouter returns a router with all the routes.
func newRouter(s *Server) *httprouter.Router {
r := httprouter.New()
if s.config.ClientWhitelistEnabled {
r.GET("/clients/:clientID", makeHandler(s.getClient))
r.PUT("/clients/:clientID", makeHandler(s.putClient))
r.DELETE("/clients/:clientID", makeHandler(s.delClient))
}
r.GET("/torrents/:infohash", makeHandler(s.getTorrent))
r.PUT("/torrents/:infohash", makeHandler(s.putTorrent))
r.DELETE("/torrents/:infohash", makeHandler(s.delTorrent))
r.GET("/check", makeHandler(s.check))
r.GET("/stats", makeHandler(s.stats))
return r
}
// connState is used by graceful in order to gracefully shutdown. It also
// keeps track of connection stats.
func (s *Server) connState(conn net.Conn, state http.ConnState) {
switch state {
case http.StateNew:
stats.RecordEvent(stats.AcceptedConnection)
case http.StateClosed:
stats.RecordEvent(stats.ClosedConnection)
case http.StateHijacked:
panic("connection impossibly hijacked")
// Ignore the following cases.
case http.StateActive, http.StateIdle:
default:
glog.Errorf("Connection transitioned to unknown state %s (%d)", state, state)
}
}
// ResponseHandler is an HTTP handler that returns a status code.
type ResponseHandler func(http.ResponseWriter, *http.Request, httprouter.Params) (int, error)
// makeHandler wraps our ResponseHandlers while timing requests, collecting,
// stats, logging, and handling errors.
func makeHandler(handler ResponseHandler) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
start := time.Now()
httpCode, err := handler(w, r, p)
duration := time.Since(start)
var msg string
if err != nil {
msg = err.Error()
} else if httpCode != http.StatusOK {
msg = http.StatusText(httpCode)
}
if len(msg) > 0 {
http.Error(w, msg, httpCode)
stats.RecordEvent(stats.ErroredRequest)
}
if len(msg) > 0 || glog.V(2) {
reqString := r.URL.Path + " " + r.RemoteAddr
if glog.V(3) {
reqString = r.URL.RequestURI() + " " + r.RemoteAddr
}
if len(msg) > 0 {
glog.Errorf("[API - %9s] %s (%d - %s)", duration, reqString, httpCode, msg)
} else {
glog.Infof("[API - %9s] %s (%d)", duration, reqString, httpCode)
}
}
stats.RecordEvent(stats.HandledRequest)
stats.RecordTiming(stats.ResponseTime, duration)
}
}

120
api/routes.go Normal file
View file

@ -0,0 +1,120 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package api
import (
"encoding/json"
"net/http"
"net/url"
"runtime"
"github.com/julienschmidt/httprouter"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker/models"
)
const jsonContentType = "application/json; charset=UTF-8"
func handleError(err error) (int, error) {
if err == nil {
return http.StatusOK, nil
} else if _, ok := err.(models.NotFoundError); ok {
stats.RecordEvent(stats.ClientError)
return http.StatusNotFound, nil
} else if _, ok := err.(models.ClientError); ok {
stats.RecordEvent(stats.ClientError)
return http.StatusBadRequest, nil
}
return http.StatusInternalServerError, err
}
func (s *Server) check(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
_, err := w.Write([]byte("STILL-ALIVE"))
return handleError(err)
}
func (s *Server) stats(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
w.Header().Set("Content-Type", jsonContentType)
var err error
var val interface{}
query := r.URL.Query()
stats.DefaultStats.GoRoutines = runtime.NumGoroutine()
if _, flatten := query["flatten"]; flatten {
val = stats.DefaultStats.Flattened()
} else {
val = stats.DefaultStats
}
if _, pretty := query["pretty"]; pretty {
var buf []byte
buf, err = json.MarshalIndent(val, "", " ")
if err == nil {
_, err = w.Write(buf)
}
} else {
err = json.NewEncoder(w).Encode(val)
}
return handleError(err)
}
func (s *Server) getTorrent(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
infohash, err := url.QueryUnescape(p.ByName("infohash"))
if err != nil {
return http.StatusNotFound, err
}
torrent, err := s.tracker.FindTorrent(infohash)
if err != nil {
return handleError(err)
}
w.Header().Set("Content-Type", jsonContentType)
e := json.NewEncoder(w)
return handleError(e.Encode(torrent))
}
func (s *Server) putTorrent(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
var torrent models.Torrent
err := json.NewDecoder(r.Body).Decode(&torrent)
if err != nil {
return http.StatusBadRequest, err
}
s.tracker.PutTorrent(&torrent)
return http.StatusOK, nil
}
func (s *Server) delTorrent(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
infohash, err := url.QueryUnescape(p.ByName("infohash"))
if err != nil {
return http.StatusNotFound, err
}
s.tracker.DeleteTorrent(infohash)
return http.StatusOK, nil
}
func (s *Server) getClient(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
if err := s.tracker.ClientApproved(p.ByName("clientID")); err != nil {
return http.StatusNotFound, err
}
return http.StatusOK, nil
}
func (s *Server) putClient(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
s.tracker.PutClient(p.ByName("clientID"))
return http.StatusOK, nil
}
func (s *Server) delClient(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
s.tracker.DeleteClient(p.ByName("clientID"))
return http.StatusOK, nil
}

View file

@ -1,255 +0,0 @@
// Package bittorrent implements all of the abstractions used to decouple the
// protocol of a BitTorrent tracker from the logic of handling Announces and
// Scrapes.
package bittorrent
import (
"fmt"
"net"
"time"
"github.com/chihaya/chihaya/pkg/log"
)
// PeerID represents a peer ID.
type PeerID [20]byte
// PeerIDFromBytes creates a PeerID from a byte slice.
//
// It panics if b is not 20 bytes long.
func PeerIDFromBytes(b []byte) PeerID {
if len(b) != 20 {
panic("peer ID must be 20 bytes")
}
var buf [20]byte
copy(buf[:], b)
return PeerID(buf)
}
// String implements fmt.Stringer, returning the base16 encoded PeerID.
func (p PeerID) String() string {
return fmt.Sprintf("%x", p[:])
}
// RawString returns a 20-byte string of the raw bytes of the ID.
func (p PeerID) RawString() string {
return string(p[:])
}
// PeerIDFromString creates a PeerID from a string.
//
// It panics if s is not 20 bytes long.
func PeerIDFromString(s string) PeerID {
if len(s) != 20 {
panic("peer ID must be 20 bytes")
}
var buf [20]byte
copy(buf[:], s)
return PeerID(buf)
}
// InfoHash represents an infohash.
type InfoHash [20]byte
// InfoHashFromBytes creates an InfoHash from a byte slice.
//
// It panics if b is not 20 bytes long.
func InfoHashFromBytes(b []byte) InfoHash {
if len(b) != 20 {
panic("infohash must be 20 bytes")
}
var buf [20]byte
copy(buf[:], b)
return InfoHash(buf)
}
// InfoHashFromString creates an InfoHash from a string.
//
// It panics if s is not 20 bytes long.
func InfoHashFromString(s string) InfoHash {
if len(s) != 20 {
panic("infohash must be 20 bytes")
}
var buf [20]byte
copy(buf[:], s)
return InfoHash(buf)
}
// String implements fmt.Stringer, returning the base16 encoded InfoHash.
func (i InfoHash) String() string {
return fmt.Sprintf("%x", i[:])
}
// RawString returns a 20-byte string of the raw bytes of the InfoHash.
func (i InfoHash) RawString() string {
return string(i[:])
}
// AnnounceRequest represents the parsed parameters from an announce request.
type AnnounceRequest struct {
Event Event
InfoHash InfoHash
Compact bool
EventProvided bool
NumWantProvided bool
IPProvided bool
NumWant uint32
Left uint64
Downloaded uint64
Uploaded uint64
Peer
Params
}
// LogFields renders the current response as a set of log fields.
func (r AnnounceRequest) LogFields() log.Fields {
return log.Fields{
"event": r.Event,
"infoHash": r.InfoHash,
"compact": r.Compact,
"eventProvided": r.EventProvided,
"numWantProvided": r.NumWantProvided,
"ipProvided": r.IPProvided,
"numWant": r.NumWant,
"left": r.Left,
"downloaded": r.Downloaded,
"uploaded": r.Uploaded,
"peer": r.Peer,
"params": r.Params,
}
}
// AnnounceResponse represents the parameters used to create an announce
// response.
type AnnounceResponse struct {
Compact bool
Complete uint32
Incomplete uint32
Interval time.Duration
MinInterval time.Duration
IPv4Peers []Peer
IPv6Peers []Peer
}
// LogFields renders the current response as a set of log fields.
func (r AnnounceResponse) LogFields() log.Fields {
return log.Fields{
"compact": r.Compact,
"complete": r.Complete,
"interval": r.Interval,
"minInterval": r.MinInterval,
"ipv4Peers": r.IPv4Peers,
"ipv6Peers": r.IPv6Peers,
}
}
// ScrapeRequest represents the parsed parameters from a scrape request.
type ScrapeRequest struct {
AddressFamily AddressFamily
InfoHashes []InfoHash
Params Params
}
// LogFields renders the current response as a set of log fields.
func (r ScrapeRequest) LogFields() log.Fields {
return log.Fields{
"addressFamily": r.AddressFamily,
"infoHashes": r.InfoHashes,
"params": r.Params,
}
}
// ScrapeResponse represents the parameters used to create a scrape response.
//
// The Scrapes must be in the same order as the InfoHashes in the corresponding
// ScrapeRequest.
type ScrapeResponse struct {
Files []Scrape
}
// LogFields renders the current response as a set of Logrus fields.
func (sr ScrapeResponse) LogFields() log.Fields {
return log.Fields{
"files": sr.Files,
}
}
// Scrape represents the state of a swarm that is returned in a scrape response.
type Scrape struct {
InfoHash InfoHash
Snatches uint32
Complete uint32
Incomplete uint32
}
// AddressFamily is the address family of an IP address.
type AddressFamily uint8
func (af AddressFamily) String() string {
switch af {
case IPv4:
return "IPv4"
case IPv6:
return "IPv6"
default:
panic("tried to print unknown AddressFamily")
}
}
// AddressFamily constants.
const (
IPv4 AddressFamily = iota
IPv6
)
// IP is a net.IP with an AddressFamily.
type IP struct {
net.IP
AddressFamily
}
func (ip IP) String() string {
return ip.IP.String()
}
// Peer represents the connection details of a peer that is returned in an
// announce response.
type Peer struct {
ID PeerID
IP IP
Port uint16
}
// String implements fmt.Stringer to return a human-readable representation.
// The string will have the format <PeerID>@[<IP>]:<port>, for example
// "0102030405060708090a0b0c0d0e0f1011121314@[10.11.12.13]:1234"
func (p Peer) String() string {
return fmt.Sprintf("%s@[%s]:%d", p.ID.String(), p.IP.String(), p.Port)
}
// LogFields renders the current peer as a set of Logrus fields.
func (p Peer) LogFields() log.Fields {
return log.Fields{
"ID": p.ID,
"IP": p.IP,
"port": p.Port,
}
}
// Equal reports whether p and x are the same.
func (p Peer) Equal(x Peer) bool { return p.EqualEndpoint(x) && p.ID == x.ID }
// EqualEndpoint reports whether p and x have the same endpoint.
func (p Peer) EqualEndpoint(x Peer) bool { return p.Port == x.Port && p.IP.Equal(x.IP.IP) }
// ClientError represents an error that should be exposed to the client over
// the BitTorrent protocol implementation.
type ClientError string
// Error implements the error interface for ClientError.
func (c ClientError) Error() string { return string(c) }

View file

@ -1,53 +0,0 @@
package bittorrent
import (
"fmt"
"net"
"testing"
"github.com/stretchr/testify/require"
)
var (
b = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
expected = "0102030405060708090a0b0c0d0e0f1011121314"
)
var peerStringTestCases = []struct {
input Peer
expected string
}{
{
input: Peer{
ID: PeerIDFromBytes(b),
IP: IP{net.IPv4(10, 11, 12, 1), IPv4},
Port: 1234,
},
expected: fmt.Sprintf("%s@[10.11.12.1]:1234", expected),
},
{
input: Peer{
ID: PeerIDFromBytes(b),
IP: IP{net.ParseIP("2001:db8::ff00:42:8329"), IPv6},
Port: 1234,
},
expected: fmt.Sprintf("%s@[2001:db8::ff00:42:8329]:1234", expected),
},
}
func TestPeerID_String(t *testing.T) {
s := PeerIDFromBytes(b).String()
require.Equal(t, expected, s)
}
func TestInfoHash_String(t *testing.T) {
s := InfoHashFromBytes(b).String()
require.Equal(t, expected, s)
}
func TestPeer_String(t *testing.T) {
for _, c := range peerStringTestCases {
got := c.input.String()
require.Equal(t, c.expected, got)
}
}

View file

@ -1,22 +0,0 @@
package bittorrent
// ClientID represents the part of a PeerID that identifies a Peer's client
// software.
type ClientID [6]byte
// NewClientID parses a ClientID from a PeerID.
func NewClientID(pid PeerID) ClientID {
var cid ClientID
length := len(pid)
if length >= 6 {
if pid[0] == '-' {
if length >= 7 {
copy(cid[:], pid[1:7])
}
} else {
copy(cid[:], pid[:6])
}
}
return cid
}

View file

@ -1,55 +0,0 @@
package bittorrent
import (
"testing"
)
func TestClientID(t *testing.T) {
clientTable := []struct{ peerID, clientID string }{
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
{"-BS5820-oy4La2MWGEFj", "BS5820"},
{"-AR6360-6oZyyMWoOOBe", "AR6360"},
{"-AG2083-s1hiF8vGAAg0", "AG2083"},
{"-AG3003-lEl2Mm4NEO4n", "AG3003"},
{"-MR1100-00HS~T7*65rm", "MR1100"},
{"-LK0140-ATIV~nbEQAMr", "LK0140"},
{"-KT2210-347143496631", "KT2210"},
{"-TR0960-6ep6svaa61r4", "TR0960"},
{"-XX1150-dv220cotgj4d", "XX1150"},
{"-AZ2504-192gwethivju", "AZ2504"},
{"-KT4310-3L4UvarKuqIu", "KT4310"},
{"-AZ2060-0xJQ02d4309O", "AZ2060"},
{"-BD0300-2nkdf08Jd890", "BD0300"},
{"-A~0010-a9mn9DFkj39J", "A~0010"},
{"-UT2300-MNu93JKnm930", "UT2300"},
{"-UT2300-KT4310KT4301", "UT2300"},
{"T03A0----f089kjsdf6e", "T03A0-"},
{"S58B-----nKl34GoNb75", "S58B--"},
{"M4-4-0--9aa757Efd5Bl", "M4-4-0"},
{"AZ2500BTeYUzyabAfo6U", "AZ2500"}, // BitTyrant
{"exbc0JdSklm834kj9Udf", "exbc0J"}, // Old BitComet
{"FUTB0L84j542mVc84jkd", "FUTB0L"}, // Alt BitComet
{"XBT054d-8602Jn83NnF9", "XBT054"}, // XBT
{"OP1011affbecbfabeefb", "OP1011"}, // Opera
{"-ML2.7.2-kgjjfkd9762", "ML2.7."}, // MLDonkey
{"-BOWA0C-SDLFJWEIORNM", "BOWA0C"}, // Bits on Wheels
{"Q1-0-0--dsn34DFn9083", "Q1-0-0"}, // Queen Bee
{"Q1-10-0-Yoiumn39BDfO", "Q1-10-"}, // Queen Bee Alt
{"346------SDFknl33408", "346---"}, // TorreTopia
{"QVOD0054ABFFEDCCDEDB", "QVOD00"}, // Qvod
}
for _, tt := range clientTable {
t.Run(tt.peerID, func(t *testing.T) {
var clientID ClientID
copy(clientID[:], []byte(tt.clientID))
parsedID := NewClientID(PeerIDFromString(tt.peerID))
if parsedID != clientID {
t.Error("Incorrectly parsed peer ID", tt.peerID, "as", parsedID)
}
})
}
}

View file

@ -1,64 +0,0 @@
package bittorrent
import (
"errors"
"strings"
)
// ErrUnknownEvent is returned when New fails to return an event.
var ErrUnknownEvent = errors.New("unknown event")
// Event represents an event done by a BitTorrent client.
type Event uint8
const (
// None is the event when a BitTorrent client announces due to time lapsed
// since the previous announce.
None Event = iota
// Started is the event sent by a BitTorrent client when it joins a swarm.
Started
// Stopped is the event sent by a BitTorrent client when it leaves a swarm.
Stopped
// Completed is the event sent by a BitTorrent client when it finishes
// downloading all of the required chunks.
Completed
)
var (
eventToString = make(map[Event]string)
stringToEvent = make(map[string]Event)
)
func init() {
eventToString[None] = "none"
eventToString[Started] = "started"
eventToString[Stopped] = "stopped"
eventToString[Completed] = "completed"
stringToEvent[""] = None
for k, v := range eventToString {
stringToEvent[v] = k
}
}
// NewEvent returns the proper Event given a string.
func NewEvent(eventStr string) (Event, error) {
if e, ok := stringToEvent[strings.ToLower(eventStr)]; ok {
return e, nil
}
return None, ErrUnknownEvent
}
// String implements Stringer for an event.
func (e Event) String() string {
if name, ok := eventToString[e]; ok {
return name
}
panic("bittorrent: event has no associated name")
}

View file

@ -1,39 +0,0 @@
package bittorrent
import (
"fmt"
"testing"
"github.com/stretchr/testify/require"
)
func TestNew(t *testing.T) {
table := []struct {
data string
expected Event
expectedErr error
}{
{"", None, nil},
{"NONE", None, nil},
{"none", None, nil},
{"started", Started, nil},
{"stopped", Stopped, nil},
{"completed", Completed, nil},
{"notAnEvent", None, ErrUnknownEvent},
}
for _, tt := range table {
t.Run(fmt.Sprintf("%#v expecting %s", tt.data, nilPrinter(tt.expectedErr)), func(t *testing.T) {
got, err := NewEvent(tt.data)
require.Equal(t, err, tt.expectedErr, "errors should equal the expected value")
require.Equal(t, got, tt.expected, "events should equal the expected value")
})
}
}
func nilPrinter(err error) string {
if err == nil {
return "nil"
}
return err.Error()
}

View file

@ -1,219 +0,0 @@
package bittorrent
import (
"errors"
"net/url"
"strconv"
"strings"
"github.com/chihaya/chihaya/pkg/log"
)
// Params is used to fetch (optional) request parameters from an Announce.
// For HTTP Announces this includes the request path and parsed query, for UDP
// Announces this is the extracted path and parsed query from optional URLData
// as specified in BEP41.
//
// See ParseURLData for specifics on parsing and limitations.
type Params interface {
// String returns a string parsed from a query. Every key can be
// returned as a string because they are encoded in the URL as strings.
String(key string) (string, bool)
// RawPath returns the raw path from the request URL.
// The path returned can contain URL encoded data.
// For a request of the form "/announce?port=1234" this would return
// "/announce".
RawPath() string
// RawQuery returns the raw query from the request URL, excluding the
// delimiter '?'.
// For a request of the form "/announce?port=1234" this would return
// "port=1234"
RawQuery() string
}
// ErrKeyNotFound is returned when a provided key has no value associated with
// it.
var ErrKeyNotFound = errors.New("query: value for the provided key does not exist")
// ErrInvalidInfohash is returned when parsing a query encounters an infohash
// with invalid length.
var ErrInvalidInfohash = ClientError("provided invalid infohash")
// ErrInvalidQueryEscape is returned when a query string contains invalid
// escapes.
var ErrInvalidQueryEscape = ClientError("invalid query escape")
// QueryParams parses a URL Query and implements the Params interface with some
// additional helpers.
type QueryParams struct {
path string
query string
params map[string]string
infoHashes []InfoHash
}
type routeParamsKey struct{}
// RouteParamsKey is a key for the context of a request that
// contains the named parameters from the http router.
var RouteParamsKey = routeParamsKey{}
// RouteParam is a type that contains the values from the named parameters
// on the route.
type RouteParam struct {
Key string
Value string
}
// RouteParams is a collection of RouteParam instances.
type RouteParams []RouteParam
// ByName returns the value of the first RouteParam that matches the given
// name. If no matching RouteParam is found, an empty string is returned.
// In the event that a "catch-all" parameter is provided on the route and
// no value is matched, an empty string is returned. For example: a route of
// "/announce/*param" matches on "/announce/". However, ByName("param") will
// return an empty string.
func (rp RouteParams) ByName(name string) string {
for _, p := range rp {
if p.Key == name {
return p.Value
}
}
return ""
}
// ParseURLData parses a request URL or UDP URLData as defined in BEP41.
// It expects a concatenated string of the request's path and query parts as
// defined in RFC 3986. As both the udp: and http: scheme used by BitTorrent
// include an authority part the path part must always begin with a slash.
// An example of the expected URLData would be "/announce?port=1234&uploaded=0"
// or "/?auth=0x1337".
// HTTP servers should pass (*http.Request).RequestURI, UDP servers should
// pass the concatenated, unchanged URLData as defined in BEP41.
//
// Note that, in the case of a key occurring multiple times in the query, only
// the last value for that key is kept.
// The only exception to this rule is the key "info_hash" which will attempt to
// parse each value as an InfoHash and return an error if parsing fails. All
// InfoHashes are collected and can later be retrieved by calling the InfoHashes
// method.
//
// Also note that any error that is encountered during parsing is returned as a
// ClientError, as this method is expected to be used to parse client-provided
// data.
func ParseURLData(urlData string) (*QueryParams, error) {
var path, query string
queryDelim := strings.IndexAny(urlData, "?")
if queryDelim == -1 {
path = urlData
} else {
path = urlData[:queryDelim]
query = urlData[queryDelim+1:]
}
q, err := parseQuery(query)
if err != nil {
return nil, ClientError(err.Error())
}
q.path = path
return q, nil
}
// parseQuery parses a URL query into QueryParams.
// The query is expected to exclude the delimiting '?'.
func parseQuery(query string) (q *QueryParams, err error) {
// This is basically url.parseQuery, but with a map[string]string
// instead of map[string][]string for the values.
q = &QueryParams{
query: query,
infoHashes: nil,
params: make(map[string]string),
}
for query != "" {
key := query
if i := strings.IndexAny(key, "&;"); i >= 0 {
key, query = key[:i], key[i+1:]
} else {
query = ""
}
if key == "" {
continue
}
value := ""
if i := strings.Index(key, "="); i >= 0 {
key, value = key[:i], key[i+1:]
}
key, err = url.QueryUnescape(key)
if err != nil {
// QueryUnescape returns an error like "invalid escape: '%x'".
// But frontends record these errors to prometheus, which generates
// a lot of time series.
// We log it here for debugging instead.
log.Debug("failed to unescape query param key", log.Err(err))
return nil, ErrInvalidQueryEscape
}
value, err = url.QueryUnescape(value)
if err != nil {
// QueryUnescape returns an error like "invalid escape: '%x'".
// But frontends record these errors to prometheus, which generates
// a lot of time series.
// We log it here for debugging instead.
log.Debug("failed to unescape query param value", log.Err(err))
return nil, ErrInvalidQueryEscape
}
if key == "info_hash" {
if len(value) != 20 {
return nil, ErrInvalidInfohash
}
q.infoHashes = append(q.infoHashes, InfoHashFromString(value))
} else {
q.params[strings.ToLower(key)] = value
}
}
return q, nil
}
// String returns a string parsed from a query. Every key can be returned as a
// string because they are encoded in the URL as strings.
func (qp *QueryParams) String(key string) (string, bool) {
value, ok := qp.params[key]
return value, ok
}
// Uint returns a uint parsed from a query. After being called, it is safe to
// cast the uint64 to your desired length.
func (qp *QueryParams) Uint(key string, bitSize int) (uint64, error) {
str, exists := qp.params[key]
if !exists {
return 0, ErrKeyNotFound
}
val, err := strconv.ParseUint(str, 10, bitSize)
if err != nil {
return 0, err
}
return val, nil
}
// InfoHashes returns a list of requested infohashes.
func (qp *QueryParams) InfoHashes() []InfoHash {
return qp.infoHashes
}
// RawPath returns the raw path from the parsed URL.
func (qp *QueryParams) RawPath() string {
return qp.path
}
// RawQuery returns the raw query from the parsed URL.
func (qp *QueryParams) RawQuery() string {
return qp.query
}

View file

@ -1,129 +0,0 @@
package bittorrent
import (
"net/url"
"testing"
)
var (
testPeerID = "-TEST01-6wfG2wk6wWLc"
ValidAnnounceArguments = []url.Values{
{},
{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "numwant": {"28"}},
{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"stopped"}},
{"peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"started"}, "numwant": {"13"}},
{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "no_peer_id": {"1"}},
{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}},
{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}},
{"peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
{"peer_id": {"%3Ckey%3A+0x90%3E"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
{"peer_id": {"%3Ckey%3A+0x90%3E"}, "compact": {"1"}},
{"peer_id": {""}, "compact": {""}},
}
InvalidQueries = []string{
"/announce?" + "info_hash=%0%a",
}
// See https://github.com/chihaya/chihaya/issues/334.
shouldNotPanicQueries = []string{
"/annnounce?" + "info_hash=" + testPeerID + "&a",
"/annnounce?" + "info_hash=" + testPeerID + "&=b?",
}
)
func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool {
if len(boxed) != len(unboxed) {
return false
}
for mapKey, mapVal := range boxed {
// Always expect box to hold only one element
if len(mapVal) != 1 || mapVal[0] != unboxed[mapKey] {
return false
}
}
return true
}
func TestParseEmptyURLData(t *testing.T) {
parsedQuery, err := ParseURLData("")
if err != nil {
t.Fatal(err)
}
if parsedQuery == nil {
t.Fatal("Parsed query must not be nil")
}
}
func TestParseValidURLData(t *testing.T) {
for parseIndex, parseVal := range ValidAnnounceArguments {
parsedQueryObj, err := ParseURLData("/announce?" + parseVal.Encode())
if err != nil {
t.Fatal(err)
}
if !mapArrayEqual(parseVal, parsedQueryObj.params) {
t.Fatalf("Incorrect parse at item %d.\n Expected=%v\n Received=%v\n", parseIndex, parseVal, parsedQueryObj.params)
}
if parsedQueryObj.path != "/announce" {
t.Fatalf("Incorrect path, expected %q, got %q", "/announce", parsedQueryObj.path)
}
}
}
func TestParseInvalidURLData(t *testing.T) {
for parseIndex, parseStr := range InvalidQueries {
parsedQueryObj, err := ParseURLData(parseStr)
if err == nil {
t.Fatal("Should have produced error", parseIndex)
}
if parsedQueryObj != nil {
t.Fatal("Should be nil after error", parsedQueryObj, parseIndex)
}
}
}
func TestParseShouldNotPanicURLData(t *testing.T) {
for _, parseStr := range shouldNotPanicQueries {
_, _ = ParseURLData(parseStr)
}
}
func BenchmarkParseQuery(b *testing.B) {
announceStrings := make([]string, 0)
for i := range ValidAnnounceArguments {
announceStrings = append(announceStrings, ValidAnnounceArguments[i].Encode())
}
b.ResetTimer()
for bCount := 0; bCount < b.N; bCount++ {
i := bCount % len(announceStrings)
parsedQueryObj, err := parseQuery(announceStrings[i])
if err != nil {
b.Error(err, i)
b.Log(parsedQueryObj)
}
}
}
func BenchmarkURLParseQuery(b *testing.B) {
announceStrings := make([]string, 0)
for i := range ValidAnnounceArguments {
announceStrings = append(announceStrings, ValidAnnounceArguments[i].Encode())
}
b.ResetTimer()
for bCount := 0; bCount < b.N; bCount++ {
i := bCount % len(announceStrings)
parsedQueryObj, err := url.ParseQuery(announceStrings[i])
if err != nil {
b.Error(err, i)
b.Log(parsedQueryObj)
}
}
}

View file

@ -1,49 +0,0 @@
package bittorrent
import (
"net"
"github.com/chihaya/chihaya/pkg/log"
)
// ErrInvalidIP indicates an invalid IP for an Announce.
var ErrInvalidIP = ClientError("invalid IP")
// SanitizeAnnounce enforces a max and default NumWant and coerces the peer's
// IP address into the proper format.
func SanitizeAnnounce(r *AnnounceRequest, maxNumWant, defaultNumWant uint32) error {
if !r.NumWantProvided {
r.NumWant = defaultNumWant
} else if r.NumWant > maxNumWant {
r.NumWant = maxNumWant
}
if ip := r.Peer.IP.To4(); ip != nil {
r.Peer.IP.IP = ip
r.Peer.IP.AddressFamily = IPv4
} else if len(r.Peer.IP.IP) == net.IPv6len { // implies r.Peer.IP.To4() == nil
r.Peer.IP.AddressFamily = IPv6
} else {
return ErrInvalidIP
}
log.Debug("sanitized announce", r, log.Fields{
"maxNumWant": maxNumWant,
"defaultNumWant": defaultNumWant,
})
return nil
}
// SanitizeScrape enforces a max number of infohashes for a single scrape
// request.
func SanitizeScrape(r *ScrapeRequest, maxScrapeInfoHashes uint32) error {
if len(r.InfoHashes) > int(maxScrapeInfoHashes) {
r.InfoHashes = r.InfoHashes[:maxScrapeInfoHashes]
}
log.Debug("sanitized scrape", r, log.Fields{
"maxScrapeInfoHashes": maxScrapeInfoHashes,
})
return nil
}

121
chihaya.go Normal file
View file

@ -0,0 +1,121 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package chihaya implements the ability to boot the Chihaya BitTorrent
// tracker with your own imports that can dynamically register additional
// functionality.
package chihaya
import (
"flag"
"os"
"os/signal"
"runtime"
"sync"
"syscall"
"github.com/golang/glog"
"github.com/chihaya/chihaya/api"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/http"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker"
"github.com/chihaya/chihaya/udp"
)
var (
maxProcs int
configPath string
)
func init() {
flag.IntVar(&maxProcs, "maxprocs", runtime.NumCPU(), "maximum parallel threads")
flag.StringVar(&configPath, "config", "", "path to the configuration file")
}
type server interface {
Serve()
Stop()
}
// Boot starts Chihaya. By exporting this function, anyone can import their own
// custom drivers into their own package main and then call chihaya.Boot.
func Boot() {
defer glog.Flush()
flag.Parse()
runtime.GOMAXPROCS(maxProcs)
glog.V(1).Info("Set max threads to ", maxProcs)
debugBoot()
defer debugShutdown()
cfg, err := config.Open(configPath)
if err != nil {
glog.Fatalf("Failed to parse configuration file: %s\n", err)
}
if cfg == &config.DefaultConfig {
glog.V(1).Info("Using default config")
} else {
glog.V(1).Infof("Loaded config file: %s", configPath)
}
stats.DefaultStats = stats.New(cfg.StatsConfig)
tkr, err := tracker.New(cfg)
if err != nil {
glog.Fatal("New: ", err)
}
var servers []server
if cfg.APIConfig.ListenAddr != "" {
servers = append(servers, api.NewServer(cfg, tkr))
}
if cfg.HTTPConfig.ListenAddr != "" {
servers = append(servers, http.NewServer(cfg, tkr))
}
if cfg.UDPConfig.ListenAddr != "" {
servers = append(servers, udp.NewServer(cfg, tkr))
}
var wg sync.WaitGroup
for _, srv := range servers {
wg.Add(1)
// If you don't explicitly pass the server, every goroutine captures the
// last server in the list.
go func(srv server) {
defer wg.Done()
srv.Serve()
}(srv)
}
shutdown := make(chan os.Signal)
signal.Notify(shutdown, syscall.SIGINT, syscall.SIGTERM)
go func() {
wg.Wait()
signal.Stop(shutdown)
close(shutdown)
}()
<-shutdown
glog.Info("Shutting down...")
for _, srv := range servers {
srv.Stop()
}
<-shutdown
if err := tkr.Close(); err != nil {
glog.Errorf("Failed to shut down tracker cleanly: %s", err.Error())
}
}

View file

@ -1,92 +0,0 @@
package main
import (
"errors"
"io/ioutil"
"os"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/frontend/http"
"github.com/chihaya/chihaya/frontend/udp"
"github.com/chihaya/chihaya/middleware"
// Imports to register middleware drivers.
_ "github.com/chihaya/chihaya/middleware/clientapproval"
_ "github.com/chihaya/chihaya/middleware/fixedpeer"
_ "github.com/chihaya/chihaya/middleware/jwt"
_ "github.com/chihaya/chihaya/middleware/torrentapproval"
_ "github.com/chihaya/chihaya/middleware/varinterval"
// Imports to register storage drivers.
_ "github.com/chihaya/chihaya/storage/memory"
_ "github.com/chihaya/chihaya/storage/redis"
)
type storageConfig struct {
Name string `yaml:"name"`
Config interface{} `yaml:"config"`
}
// Config represents the configuration used for executing Chihaya.
type Config struct {
middleware.ResponseConfig `yaml:",inline"`
MetricsAddr string `yaml:"metrics_addr"`
HTTPConfig http.Config `yaml:"http"`
UDPConfig udp.Config `yaml:"udp"`
Storage storageConfig `yaml:"storage"`
PreHooks []middleware.HookConfig `yaml:"prehooks"`
PostHooks []middleware.HookConfig `yaml:"posthooks"`
}
// PreHookNames returns only the names of the configured middleware.
func (cfg Config) PreHookNames() (names []string) {
for _, hook := range cfg.PreHooks {
names = append(names, hook.Name)
}
return
}
// PostHookNames returns only the names of the configured middleware.
func (cfg Config) PostHookNames() (names []string) {
for _, hook := range cfg.PostHooks {
names = append(names, hook.Name)
}
return
}
// ConfigFile represents a namespaced YAML configation file.
type ConfigFile struct {
Chihaya Config `yaml:"chihaya"`
}
// ParseConfigFile returns a new ConfigFile given the path to a YAML
// configuration file.
//
// It supports relative and absolute paths and environment variables.
func ParseConfigFile(path string) (*ConfigFile, error) {
if path == "" {
return nil, errors.New("no config path specified")
}
f, err := os.Open(os.ExpandEnv(path))
if err != nil {
return nil, err
}
defer f.Close()
contents, err := ioutil.ReadAll(f)
if err != nil {
return nil, err
}
var cfgFile ConfigFile
err = yaml.Unmarshal(contents, &cfgFile)
if err != nil {
return nil, err
}
return &cfgFile, nil
}

View file

@ -1,134 +0,0 @@
package main
import (
"crypto/rand"
"fmt"
"time"
"github.com/anacrolix/torrent/tracker"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/pkg/log"
)
// EndToEndRunCmdFunc implements a Cobra command that runs the end-to-end test
// suite for a Chihaya build.
func EndToEndRunCmdFunc(cmd *cobra.Command, args []string) error {
delay, err := cmd.Flags().GetDuration("delay")
if err != nil {
return err
}
// Test the HTTP tracker
httpAddr, err := cmd.Flags().GetString("httpaddr")
if err != nil {
return err
}
if len(httpAddr) != 0 {
log.Info("testing HTTP...")
err := test(httpAddr, delay)
if err != nil {
return err
}
log.Info("success")
}
// Test the UDP tracker.
udpAddr, err := cmd.Flags().GetString("udpaddr")
if err != nil {
return err
}
if len(udpAddr) != 0 {
log.Info("testing UDP...")
err := test(udpAddr, delay)
if err != nil {
return err
}
log.Info("success")
}
return nil
}
func generateInfohash() [20]byte {
b := make([]byte, 20)
n, err := rand.Read(b)
if err != nil {
panic(err)
}
if n != 20 {
panic(fmt.Errorf("not enough randomness? Got %d bytes", n))
}
return [20]byte(bittorrent.InfoHashFromBytes(b))
}
func test(addr string, delay time.Duration) error {
ih := generateInfohash()
return testWithInfohash(ih, addr, delay)
}
func testWithInfohash(infoHash [20]byte, url string, delay time.Duration) error {
req := tracker.AnnounceRequest{
InfoHash: infoHash,
PeerId: [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
Downloaded: 50,
Left: 100,
Uploaded: 50,
Event: tracker.Started,
IPAddress: uint32(50<<24 | 10<<16 | 12<<8 | 1),
NumWant: 50,
Port: 10001,
}
resp, err := tracker.Announce{
TrackerUrl: url,
Request: req,
UserAgent: "chihaya-e2e",
}.Do()
if err != nil {
return errors.Wrap(err, "announce failed")
}
if len(resp.Peers) != 1 {
return fmt.Errorf("expected one peer, got %d", len(resp.Peers))
}
time.Sleep(delay)
req = tracker.AnnounceRequest{
InfoHash: infoHash,
PeerId: [20]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21},
Downloaded: 50,
Left: 100,
Uploaded: 50,
Event: tracker.Started,
IPAddress: uint32(50<<24 | 10<<16 | 12<<8 | 2),
NumWant: 50,
Port: 10002,
}
resp, err = tracker.Announce{
TrackerUrl: url,
Request: req,
UserAgent: "chihaya-e2e",
}.Do()
if err != nil {
return errors.Wrap(err, "announce failed")
}
if len(resp.Peers) != 1 {
return fmt.Errorf("expected 1 peers, got %d", len(resp.Peers))
}
if resp.Peers[0].Port != 10001 {
return fmt.Errorf("expected port 10001, got %d ", resp.Peers[0].Port)
}
return nil
}

View file

@ -1,246 +1,11 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package main
import (
"context"
"errors"
"os/signal"
"runtime"
"strings"
"syscall"
"time"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"github.com/chihaya/chihaya/frontend/http"
"github.com/chihaya/chihaya/frontend/udp"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/metrics"
"github.com/chihaya/chihaya/pkg/stop"
"github.com/chihaya/chihaya/storage"
)
// Run represents the state of a running instance of Chihaya.
type Run struct {
configFilePath string
peerStore storage.PeerStore
logic *middleware.Logic
sg *stop.Group
}
// NewRun runs an instance of Chihaya.
func NewRun(configFilePath string) (*Run, error) {
r := &Run{
configFilePath: configFilePath,
}
return r, r.Start(nil)
}
// Start begins an instance of Chihaya.
// It is optional to provide an instance of the peer store to avoid the
// creation of a new one.
func (r *Run) Start(ps storage.PeerStore) error {
configFile, err := ParseConfigFile(r.configFilePath)
if err != nil {
return errors.New("failed to read config: " + err.Error())
}
cfg := configFile.Chihaya
r.sg = stop.NewGroup()
log.Info("starting metrics server", log.Fields{"addr": cfg.MetricsAddr})
r.sg.Add(metrics.NewServer(cfg.MetricsAddr))
if ps == nil {
log.Info("starting storage", log.Fields{"name": cfg.Storage.Name})
ps, err = storage.NewPeerStore(cfg.Storage.Name, cfg.Storage.Config)
if err != nil {
return errors.New("failed to create storage: " + err.Error())
}
log.Info("started storage", ps)
}
r.peerStore = ps
preHooks, err := middleware.HooksFromHookConfigs(cfg.PreHooks)
if err != nil {
return errors.New("failed to validate hook config: " + err.Error())
}
postHooks, err := middleware.HooksFromHookConfigs(cfg.PostHooks)
if err != nil {
return errors.New("failed to validate hook config: " + err.Error())
}
log.Info("starting tracker logic", log.Fields{
"prehooks": cfg.PreHookNames(),
"posthooks": cfg.PostHookNames(),
})
r.logic = middleware.NewLogic(cfg.ResponseConfig, r.peerStore, preHooks, postHooks)
if cfg.HTTPConfig.Addr != "" {
log.Info("starting HTTP frontend", cfg.HTTPConfig)
httpfe, err := http.NewFrontend(r.logic, cfg.HTTPConfig)
if err != nil {
return err
}
r.sg.Add(httpfe)
}
if cfg.UDPConfig.Addr != "" {
log.Info("starting UDP frontend", cfg.UDPConfig)
udpfe, err := udp.NewFrontend(r.logic, cfg.UDPConfig)
if err != nil {
return err
}
r.sg.Add(udpfe)
}
return nil
}
func combineErrors(prefix string, errs []error) error {
errStrs := make([]string, 0, len(errs))
for _, err := range errs {
errStrs = append(errStrs, err.Error())
}
return errors.New(prefix + ": " + strings.Join(errStrs, "; "))
}
// Stop shuts down an instance of Chihaya.
func (r *Run) Stop(keepPeerStore bool) (storage.PeerStore, error) {
log.Debug("stopping frontends and metrics server")
if errs := r.sg.Stop().Wait(); len(errs) != 0 {
return nil, combineErrors("failed while shutting down frontends", errs)
}
log.Debug("stopping logic")
if errs := r.logic.Stop().Wait(); len(errs) != 0 {
return nil, combineErrors("failed while shutting down middleware", errs)
}
if !keepPeerStore {
log.Debug("stopping peer store")
if errs := r.peerStore.Stop().Wait(); len(errs) != 0 {
return nil, combineErrors("failed while shutting down peer store", errs)
}
r.peerStore = nil
}
return r.peerStore, nil
}
// RootRunCmdFunc implements a Cobra command that runs an instance of Chihaya
// and handles reloading and shutdown via process signals.
func RootRunCmdFunc(cmd *cobra.Command, args []string) error {
configFilePath, err := cmd.Flags().GetString("config")
if err != nil {
return err
}
r, err := NewRun(configFilePath)
if err != nil {
return err
}
ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
reload, _ := signal.NotifyContext(context.Background(), ReloadSignals...)
for {
select {
case <-reload.Done():
log.Info("reloading; received reload signal")
peerStore, err := r.Stop(true)
if err != nil {
return err
}
if err := r.Start(peerStore); err != nil {
return err
}
case <-ctx.Done():
log.Info("shutting down; received shutdown signal")
if _, err := r.Stop(false); err != nil {
return err
}
return nil
}
}
}
// RootPreRunCmdFunc handles command line flags for the Run command.
func RootPreRunCmdFunc(cmd *cobra.Command, args []string) error {
noColors, err := cmd.Flags().GetBool("nocolors")
if err != nil {
return err
}
if noColors {
log.SetFormatter(&logrus.TextFormatter{DisableColors: true})
}
jsonLog, err := cmd.Flags().GetBool("json")
if err != nil {
return err
}
if jsonLog {
log.SetFormatter(&logrus.JSONFormatter{})
log.Info("enabled JSON logging")
}
debugLog, err := cmd.Flags().GetBool("debug")
if err != nil {
return err
}
if debugLog {
log.SetDebug(true)
log.Info("enabled debug logging")
}
return nil
}
// RootPostRunCmdFunc handles clean up of any state initialized by command line
// flags.
func RootPostRunCmdFunc(cmd *cobra.Command, args []string) error {
return nil
}
import "github.com/chihaya/chihaya"
func main() {
rootCmd := &cobra.Command{
Use: "chihaya",
Short: "BitTorrent Tracker",
Long: "A customizable, multi-protocol BitTorrent Tracker",
PersistentPreRunE: RootPreRunCmdFunc,
RunE: RootRunCmdFunc,
PersistentPostRunE: RootPostRunCmdFunc,
}
rootCmd.PersistentFlags().Bool("debug", false, "enable debug logging")
rootCmd.PersistentFlags().Bool("json", false, "enable json logging")
if runtime.GOOS == "windows" {
rootCmd.PersistentFlags().Bool("nocolors", true, "disable log coloring")
} else {
rootCmd.PersistentFlags().Bool("nocolors", false, "disable log coloring")
}
rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file")
e2eCmd := &cobra.Command{
Use: "e2e",
Short: "exec e2e tests",
Long: "Execute the Chihaya end-to-end test suite",
RunE: EndToEndRunCmdFunc,
}
e2eCmd.Flags().String("httpaddr", "http://127.0.0.1:6969/announce", "address of the HTTP tracker")
e2eCmd.Flags().String("udpaddr", "udp://127.0.0.1:6969", "address of the UDP tracker")
e2eCmd.Flags().Duration("delay", time.Second, "delay between announces")
rootCmd.AddCommand(e2eCmd)
if err := rootCmd.Execute(); err != nil {
log.Fatal("failed when executing root cobra command: " + err.Error())
}
chihaya.Boot()
}

View file

@ -1,15 +0,0 @@
//go:build darwin || freebsd || linux || netbsd || openbsd || dragonfly || solaris
// +build darwin freebsd linux netbsd openbsd dragonfly solaris
package main
import (
"os"
"syscall"
)
// ReloadSignals are the signals that the current OS will send to the process
// when a configuration reload is requested.
var ReloadSignals = []os.Signal{
syscall.SIGUSR1,
}

View file

@ -1,14 +0,0 @@
//go:build windows
// +build windows
package main
import (
"os"
"os/signal"
"syscall"
)
var ReloadSignals = []os.Signal{
syscall.SIGHUP,
}

193
config/config.go Normal file
View file

@ -0,0 +1,193 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package config implements the configuration for a BitTorrent tracker
package config
import (
"encoding/json"
"io"
"os"
"time"
)
// Duration wraps a time.Duration and adds JSON marshalling.
type Duration struct{ time.Duration }
// MarshalJSON transforms a duration into JSON.
func (d *Duration) MarshalJSON() ([]byte, error) {
return json.Marshal(d.String())
}
// UnmarshalJSON transform JSON into a Duration.
func (d *Duration) UnmarshalJSON(b []byte) error {
var str string
err := json.Unmarshal(b, &str)
d.Duration, err = time.ParseDuration(str)
return err
}
// SubnetConfig is the configuration used to specify if local peers should be
// given a preference when responding to an announce.
type SubnetConfig struct {
PreferredSubnet bool `json:"preferredSubnet,omitempty"`
PreferredIPv4Subnet int `json:"preferredIPv4Subnet,omitempty"`
PreferredIPv6Subnet int `json:"preferredIPv6Subnet,omitempty"`
}
// NetConfig is the configuration used to tune networking behaviour.
type NetConfig struct {
AllowIPSpoofing bool `json:"allowIPSpoofing"`
DualStackedPeers bool `json:"dualStackedPeers"`
RealIPHeader string `json:"realIPHeader"`
RespectAF bool `json:"respectAF"`
SubnetConfig
}
// StatsConfig is the configuration used to record runtime statistics.
type StatsConfig struct {
BufferSize int `json:"statsBufferSize"`
IncludeMem bool `json:"includeMemStats"`
VerboseMem bool `json:"verboseMemStats"`
MemUpdateInterval Duration `json:"memStatsInterval"`
}
// WhitelistConfig is the configuration used enable and store a whitelist of
// acceptable torrent client peer ID prefixes.
type WhitelistConfig struct {
ClientWhitelistEnabled bool `json:"clientWhitelistEnabled"`
ClientWhitelist []string `json:"clientWhitelist,omitempty"`
}
// TrackerConfig is the configuration for tracker functionality.
type TrackerConfig struct {
CreateOnAnnounce bool `json:"createOnAnnounce"`
PurgeInactiveTorrents bool `json:"purgeInactiveTorrents"`
Announce Duration `json:"announce"`
MinAnnounce Duration `json:"minAnnounce"`
ReapInterval Duration `json:"reapInterval"`
ReapRatio float64 `json:"reapRatio"`
NumWantFallback int `json:"defaultNumWant"`
TorrentMapShards int `json:"torrentMapShards"`
JWKSetURI string `json:"jwkSetURI"`
JWKSetUpdateInterval Duration `json:"jwkSetUpdateInterval"`
JWTAudience string `json:"jwtAudience"`
NetConfig
WhitelistConfig
}
// APIConfig is the configuration for an HTTP JSON API server.
type APIConfig struct {
ListenAddr string `json:"apiListenAddr"`
RequestTimeout Duration `json:"apiRequestTimeout"`
ReadTimeout Duration `json:"apiReadTimeout"`
WriteTimeout Duration `json:"apiWriteTimeout"`
ListenLimit int `json:"apiListenLimit"`
}
// HTTPConfig is the configuration for the HTTP protocol.
type HTTPConfig struct {
ListenAddr string `json:"httpListenAddr"`
RequestTimeout Duration `json:"httpRequestTimeout"`
ReadTimeout Duration `json:"httpReadTimeout"`
WriteTimeout Duration `json:"httpWriteTimeout"`
ListenLimit int `json:"httpListenLimit"`
}
// UDPConfig is the configuration for the UDP protocol.
type UDPConfig struct {
ListenAddr string `json:"udpListenAddr"`
ReadBufferSize int `json:"udpReadBufferSize"`
}
// Config is the global configuration for an instance of Chihaya.
type Config struct {
TrackerConfig
APIConfig
HTTPConfig
UDPConfig
StatsConfig
}
// DefaultConfig is a configuration that can be used as a fallback value.
var DefaultConfig = Config{
TrackerConfig: TrackerConfig{
CreateOnAnnounce: true,
PurgeInactiveTorrents: true,
Announce: Duration{30 * time.Minute},
MinAnnounce: Duration{15 * time.Minute},
ReapInterval: Duration{60 * time.Second},
ReapRatio: 1.25,
NumWantFallback: 50,
TorrentMapShards: 1,
JWKSetURI: "",
JWKSetUpdateInterval: Duration{5 * time.Minute},
JWTAudience: "",
NetConfig: NetConfig{
AllowIPSpoofing: true,
DualStackedPeers: true,
RespectAF: false,
},
WhitelistConfig: WhitelistConfig{
ClientWhitelistEnabled: false,
},
},
APIConfig: APIConfig{
ListenAddr: "localhost:6880",
RequestTimeout: Duration{10 * time.Second},
ReadTimeout: Duration{10 * time.Second},
WriteTimeout: Duration{10 * time.Second},
},
HTTPConfig: HTTPConfig{
ListenAddr: "localhost:6881",
RequestTimeout: Duration{10 * time.Second},
ReadTimeout: Duration{10 * time.Second},
WriteTimeout: Duration{10 * time.Second},
},
UDPConfig: UDPConfig{
ListenAddr: "localhost:6882",
},
StatsConfig: StatsConfig{
BufferSize: 0,
IncludeMem: true,
VerboseMem: false,
MemUpdateInterval: Duration{5 * time.Second},
},
}
// Open is a shortcut to open a file, read it, and generate a Config.
// It supports relative and absolute paths. Given "", it returns DefaultConfig.
func Open(path string) (*Config, error) {
if path == "" {
return &DefaultConfig, nil
}
f, err := os.Open(os.ExpandEnv(path))
if err != nil {
return nil, err
}
defer f.Close()
conf, err := Decode(f)
if err != nil {
return nil, err
}
return conf, nil
}
// Decode casts an io.Reader into a JSONDecoder and decodes it into a *Config.
func Decode(r io.Reader) (*Config, error) {
conf := DefaultConfig
err := json.NewDecoder(r).Decode(&conf)
return &conf, err
}

56
debug.go Normal file
View file

@ -0,0 +1,56 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package chihaya
import (
"flag"
"net/http"
"os"
"runtime/pprof"
_ "net/http/pprof"
"github.com/golang/glog"
)
var (
profile string
debugAddr string
profileFile *os.File
)
func init() {
flag.StringVar(&profile, "profile", "", "if non-empty, path to write CPU profiling data")
flag.StringVar(&debugAddr, "debug", "", "if non-empty, address to serve debug data")
}
func debugBoot() {
var err error
if debugAddr != "" {
go func() {
glog.Info("Starting debug HTTP on ", debugAddr)
glog.Fatal(http.ListenAndServe(debugAddr, nil))
}()
}
if profile != "" {
profileFile, err = os.Create(profile)
if err != nil {
glog.Fatalf("Failed to create profile file: %s\n", err)
}
pprof.StartCPUProfile(profileFile)
glog.Info("Started profiling")
}
}
func debugShutdown() {
if profileFile != nil {
profileFile.Close()
pprof.StopCPUProfile()
glog.Info("Stopped profiling")
}
}

View file

@ -1,197 +0,0 @@
---
chihaya:
# The interval communicated with BitTorrent clients informing them how
# frequently they should announce in between client events.
announce_interval: "30m"
# The interval communicated with BitTorrent clients informing them of the
# minimal duration between announces.
min_announce_interval: "15m"
# The network interface that will bind to an HTTP endpoint that can be
# scraped by programs collecting metrics.
#
# /metrics serves metrics in the Prometheus format
# /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format
metrics_addr: "0.0.0.0:6880"
# This block defines configuration for the tracker's HTTP interface.
# If you do not wish to run this, delete this section.
http:
# The network interface that will bind to an HTTP server for serving
# BitTorrent traffic. Remove this to disable the non-TLS listener.
addr: "0.0.0.0:6969"
# The network interface that will bind to an HTTPS server for serving
# BitTorrent traffic. If set, tls_cert_path and tls_key_path are required.
https_addr: ""
# The path to the required files to listen via HTTPS.
tls_cert_path: ""
tls_key_path: ""
# The timeout durations for HTTP requests.
read_timeout: "5s"
write_timeout: "5s"
# When true, persistent connections will be allowed. Generally this is not
# useful for a public tracker, but helps performance in some cases (use of
# a reverse proxy, or when there are few clients issuing many requests).
enable_keepalive: false
idle_timeout: "30s"
# Whether to time requests.
# Disabling this should increase performance/decrease load.
enable_request_timing: false
# An array of routes to listen on for announce requests. This is an option
# to support trackers that do not listen for /announce or need to listen
# on multiple routes.
#
# This supports named parameters and catch-all parameters as described at
# https://github.com/julienschmidt/httprouter#named-parameters
announce_routes:
- "/announce"
# - "/announce.php"
# An array of routes to listen on for scrape requests. This is an option
# to support trackers that do not listen for /scrape or need to listen
# on multiple routes.
#
# This supports named parameters and catch-all parameters as described at
# https://github.com/julienschmidt/httprouter#named-parameters
scrape_routes:
- "/scrape"
# - "/scrape.php"
# When enabled, the IP address used to connect to the tracker will not
# override the value clients advertise as their IP address.
allow_ip_spoofing: false
# The HTTP Header containing the IP address of the client.
# This is only necessary if using a reverse proxy.
real_ip_header: "x-real-ip"
# The maximum number of peers returned for an individual request.
max_numwant: 100
# The default number of peers returned for an individual request.
default_numwant: 50
# The maximum number of infohashes that can be scraped in one request.
max_scrape_infohashes: 50
# This block defines configuration for the tracker's UDP interface.
# If you do not wish to run this, delete this section.
udp:
# The network interface that will bind to a UDP server for serving
# BitTorrent traffic.
addr: "0.0.0.0:6969"
# The leeway for a timestamp on a connection ID.
max_clock_skew: "10s"
# The key used to encrypt connection IDs.
private_key: "paste a random string here that will be used to hmac connection IDs"
# Whether to time requests.
# Disabling this should increase performance/decrease load.
enable_request_timing: false
# When enabled, the IP address used to connect to the tracker will not
# override the value clients advertise as their IP address.
allow_ip_spoofing: false
# The maximum number of peers returned for an individual request.
max_numwant: 100
# The default number of peers returned for an individual request.
default_numwant: 50
# The maximum number of infohashes that can be scraped in one request.
max_scrape_infohashes: 50
# This block defines configuration used for the storage of peer data.
storage:
name: "memory"
config:
# The frequency which stale peers are removed.
# This balances between
# - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
# - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
gc_interval: "3m"
# The amount of time until a peer is considered stale.
# To avoid churn, keep this slightly larger than `announce_interval`
peer_lifetime: "31m"
# The number of partitions data will be divided into in order to provide a
# higher degree of parallelism.
shard_count: 1024
# The interval at which metrics about the number of infohashes and peers
# are collected and posted to Prometheus.
prometheus_reporting_interval: "1s"
# This block defines configuration used for redis storage.
# storage:
# name: redis
# config:
# # The frequency which stale peers are removed.
# # This balances between
# # - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
# # - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
# gc_interval: "3m"
# # The interval at which metrics about the number of infohashes and peers
# # are collected and posted to Prometheus.
# prometheus_reporting_interval: "1s"
# # The amount of time until a peer is considered stale.
# # To avoid churn, keep this slightly larger than `announce_interval`
# peer_lifetime: "31m"
# # The address of redis storage.
# redis_broker: "redis://pwd@127.0.0.1:6379/0"
# # The timeout for reading a command reply from redis.
# redis_read_timeout: "15s"
# # The timeout for writing a command to redis.
# redis_write_timeout: "15s"
# # The timeout for connecting to redis server.
# redis_connect_timeout: "15s"
# This block defines configuration used for middleware executed before a
# response has been returned to a BitTorrent client.
prehooks:
# - name: "jwt"
# options:
# issuer: "https://issuer.com"
# audience: "https://chihaya.issuer.com"
# jwk_set_url: "https://issuer.com/keys"
# jwk_set_update_interval: "5m"
# - name: "client approval"
# options:
# whitelist:
# - "OP1011"
# blacklist:
# - "OP1012"
# - name: "interval variation"
# options:
# modify_response_probability: 0.2
# max_increase_delta: 60
# modify_min_interval: true
# This block defines configuration used for torrent approval, it requires to be given
# hashes for whitelist or for blacklist. Hashes are hexadecimal-encoaded.
# - name: "torrent approval"
# options:
# whitelist:
# - "a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5"
# blacklist:
# - "e1d2c3b4a5e1b2c3b4a5e1d2c3b4e5e1d2c3b4a5"

View file

@ -1,21 +0,0 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View file

@ -1,10 +0,0 @@
apiVersion: v1
name: chihaya
home: https://chihaya.io
version: 0.1.0
description: A Helm chart for running the Chihaya BitTorrent tracker on Kubernetes.
sources:
- https://github.com/chihaya/chihaya
maintainers:
- name: Jimmy Zelinskie
email: jimmyzelinskie@gmail.com

View file

@ -1,6 +0,0 @@
You can port forward a local port to Prometheus or the HTTP tracker by running:
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ template "fullname" . }}" -o jsonpath="{.items[0].metadata.name}")
# Metrics port
kubectl port-forward $POD_NAME 8080:{{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
# HTTP tracker port
kubectl port-forward $POD_NAME 8080:{{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}

View file

@ -1,16 +0,0 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 24 -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 24 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
*/}}
{{- define "fullname" -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- printf "%s-%s" .Release.Name $name | trunc 24 -}}
{{- end -}}

View file

@ -1,9 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
data:
config.yaml: |
{{ toYaml .Values.config | indent 4 }}

View file

@ -1,43 +0,0 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
spec:
replicas: {{ .Values.replicaCount }}
template:
metadata:
labels:
app: {{ template "fullname" . }}
spec:
volumes:
- name: config
configMap:
name: {{ template "fullname" . }}
containers:
- name: {{ .Chart.Name }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
args:
- "--config=/etc/chihaya/config.yaml"
- "--debug"
- "--json"
ports:
- name: bittorrent-http
containerPort: {{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}
protocol: TCP
- name: bittorrent-udp
containerPort: {{ $v := .Values.config.chihaya.udp.addr | split ":" }}{{ $v._1 }}
protocol: UDP
- name: metrics
containerPort: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
livenessProbe:
httpGet:
path: /
port: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
volumeMounts:
- name: config
mountPath: /etc/chihaya
resources:
{{ toYaml .Values.resources | indent 10 }}

View file

@ -1,27 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: {{ template "fullname" . }}
labels:
chart: "{{ .Chart.Name }}-{{ .Chart.Version }}"
annotations:
prometheus.io/scrape: "true"
prometheus.io/path: "/metrics"
prometheus.io/port: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 | quote }}
spec:
type: {{ .Values.service.type }}
ports:
- name: bittorrent-http
port: {{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}
targetPort: {{ $v := .Values.config.chihaya.http.addr | split ":" }}{{ $v._1 }}
protocol: TCP
- name: bittorrent-udp
port: {{ $v := .Values.config.chihaya.udp.addr | split ":" }}{{ $v._1 }}
targetPort: {{ $v := .Values.config.chihaya.udp.addr | split ":" }}{{ $v._1 }}
protocol: UDP
- name: metrics
port: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
targetPort: {{ $v := .Values.config.chihaya.metrics_addr | split ":" }}{{ $v._1 }}
protocol: TCP
selector:
app: {{ template "fullname" . }}

View file

@ -1,162 +0,0 @@
replicaCount: 1
image:
repository: quay.io/jzelinskie/chihaya-git
tag: latest
pullPolicy: IfNotPresent
service:
name: chihaya
type: ClusterIP
resources:
limits:
cpu: 100m
memory: 1Gi
config:
chihaya:
# The interval communicated with BitTorrent clients informing them how
# frequently they should announce in between client events.
announce_interval: 30m
# The interval communicated with BitTorrent clients informing them of the
# minimal duration between announces.
min_announce_interval: 15m
# The network interface that will bind to an HTTP endpoint that can be
# scraped by programs collecting metrics.
#
# /metrics serves metrics in the Prometheus format
# /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format
metrics_addr: "0.0.0.0:6880"
# The maximum number of peers returned in an announce.
max_numwant: 50
# The default number of peers returned in an announce.
default_numwant: 25
# The number of infohashes a single scrape can request before being truncated.
max_scrape_infohashes: 50
# This block defines configuration for the tracker's HTTP interface.
# If you do not wish to run this, delete this section.
http:
# The network interface that will bind to an HTTP server for serving
# BitTorrent traffic.
addr: "0.0.0.0:6969"
# The path to the required files to listen via HTTPS.
tls_cert_path: ""
tls_key_path: ""
# The timeout durations for HTTP requests.
read_timeout: 5s
write_timeout: 5s
# Whether to time requests.
# Disabling this should increase performance/decrease load.
enable_request_timing: false
# When true, persistent connections will be allowed. Generally this is not
# useful for a public tracker, but helps performance in some cases (use of
# a reverse proxy, or when there are few clients issuing many requests).
enable_keepalive: false
idle_timeout: 30s
# Whether to listen on /announce.php and /scrape.php in addition to their
# non-.php counterparts.
# This is an option for compatibility with (very) old clients or otherwise
# outdated systems.
# This might be useful to retracker.local users, for more information see
# http://rutracker.wiki/Оптимизация_обмена_битторрент_траффиком_в_локальных_сетях
# and
# http://rutracker.wiki/Retracker.local
enable_legacy_php_urls: false
# When enabled, the IP address used to connect to the tracker will not
# override the value clients advertise as their IP address.
allow_ip_spoofing: false
# The HTTP Header containing the IP address of the client.
# This is only necessary if using a reverse proxy.
real_ip_header: "x-real-ip"
# The maximum number of peers returned for an individual request.
max_numwant: 100
# The default number of peers returned for an individual request.
default_numwant: 50
# The maximum number of infohashes that can be scraped in one request.
max_scrape_infohashes: 50
# This block defines configuration for the tracker's UDP interface.
# If you do not wish to run this, delete this section.
udp:
# The network interface that will bind to a UDP server for serving
# BitTorrent traffic.
addr: "0.0.0.0:6969"
# The leeway for a timestamp on a connection ID.
max_clock_skew: 10s
# The key used to encrypt connection IDs.
private_key: "paste a random string here that will be used to hmac connection IDs"
# Whether to time requests.
# Disabling this should increase performance/decrease load.
enable_request_timing: false
# When enabled, the IP address used to connect to the tracker will not
# override the value clients advertise as their IP address.
allow_ip_spoofing: false
# The maximum number of peers returned for an individual request.
max_numwant: 100
# The default number of peers returned for an individual request.
default_numwant: 50
# The maximum number of infohashes that can be scraped in one request.
max_scrape_infohashes: 50
# This block defines configuration used for the storage of peer data.
storage:
name: memory
config:
# The frequency which stale peers are removed.
gc_interval: 3m
# The amount of time until a peer is considered stale.
# To avoid churn, keep this slightly larger than `announce_interval`
peer_lifetime: 31m
# The number of partitions data will be divided into in order to provide a
# higher degree of parallelism.
shard_count: 1024
# The interval at which metrics about the number of infohashes and peers
# are collected and posted to Prometheus.
prometheus_reporting_interval: 1s
# This block defines configuration used for middleware executed before a
# response has been returned to a BitTorrent client.
prehooks:
#- name: jwt
# options:
# issuer: "https://issuer.com"
# audience: "https://chihaya.issuer.com"
# jwk_set_url: "https://issuer.com/keys"
# jwk_set_update_interval: 5m
#- name: client approval
# options:
# whitelist:
# - "OP1011"
# blacklist:
# - "OP1012"
#- name: interval variation
# options:
# modify_response_probability: 0.2
# max_increase_delta: 60
# modify_min_interval: true

View file

@ -1,12 +0,0 @@
---
global:
scrape_interval: "5s"
evaluation_interval: "5s"
# A scrape configuration containing exactly one endpoint to scrape:
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: "local-chihaya" # you can name this however you want
scrape_interval: "5s" # optionally override the global scrape_interval
static_configs:
- targets: ["localhost:6881"] # provide the address of chihaya's prometheus endpoint

View file

@ -1,36 +0,0 @@
digraph G {
subgraph cluster_0 {
label = "chihaya";
style = "line";
color = "blue";
"Storage";
subgraph cluster_1 {
label = "frontend";
style = "line";
color = "hotpink";
"Parser";
"Writer";
}
subgraph cluster_2 {
label = "logic";
style = "line";
color = "purple";
"PreHook Middleware";
"PostHook Middleware";
"Response Generator";
}
}
"BitTorrent Client" -> "Parser";
"Parser" -> "PreHook Middleware";
"PreHook Middleware" -> "Response Generator";
"PostHook Middleware" -> "Storage";
"Storage" -> "Response Generator";
"Response Generator" -> "Writer";
"Writer" -> "BitTorrent Client";
}

View file

@ -1,16 +0,0 @@
# Architecture
## Overview
BitTorrent clients send Announce and Scrape requests to a _Frontend_.
Frontends parse requests and write responses for the particular protocol they implement.
The _TrackerLogic_ interface is used to generate responses for requests and optionally perform a task after responding to a client.
A configurable chain of _PreHook_ and _PostHook_ middleware is used to construct an instance of TrackerLogic.
PreHooks are middleware that are executed before the response has been written.
After all PreHooks have executed, any missing response fields that are required are filled by reading out of the configured implementation of the _Storage_ interface.
PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
Because they are unnecessary to for generating a response, updates to the Storage for a particular request are done asynchronously in a PostHook.
## Diagram
![architecture diagram](https://user-images.githubusercontent.com/343539/52676700-05c45c80-2ef9-11e9-9887-8366008b4e7e.png)

View file

@ -1,111 +0,0 @@
# Frontends
A _Frontend_ is a component of Chihaya that serves a BitTorrent tracker on one protocol.
The frontend accepts, parses and sanitizes requests, passes them to the _Logic_ and writes responses to _Clients_.
This documentation first gives a high-level overview of Frontends and later goes into implementation specifics.
Users of Chihaya are expected to just read the first part - developers should read both.
## Functionality
A Frontend serves one protocol, for example HTTP ([BEP 3]) or UDP ([BEP 15]).
It listens for requests and usually answers each of them with one response, a basic overview of the control flow is:
1. Read the request.
2. Parse the request.
3. Have the Logic handle the request. This calls a series of `PreHooks`.
4. Send a response to the Client.
5. Process the request and response through `PostHooks`.
## Available Frontends
Chihaya ships with frontends for HTTP(S) and UDP.
The HTTP frontend uses Go's `http` package.
The UDP frontend implements both [old-opentracker-style] IPv6 and the IPv6 support specified in [BEP 15].
The advantage of the old opentracker style is that it contains a usable IPv6 `ip` field, to enable IP overrides in announces.
## Implementing a Frontend
This part is intended for developers.
### Implementation Specifics
A frontend should serve only one protocol.
It may serve that protocol on multiple transports or networks, if applicable.
An example of that is the `http` Frontend, operating both on HTTP and HTTPS.
The typical control flow of handling announces, in more detail, is:
1. Read the request.
2. Parse the request, if invalid go to 9.
3. Validate/sanitize the request, if invalid go to 9.
4. If the request is protocol-specific, handle, respond, and go to 8.
5. Pass the request to the `TrackerLogic`'s `HandleAnnounce` or `HandleScrape` method, if an error is returned go to 9.
6. Send the response to the Client.
7. Pass the request and response to the `TrackerLogic`'s `AfterAnnounce` or `AfterScrape` method.
8. Finish, accept next request.
9. For invalid requests or errors during processing: Send an error response to the client.
This step may be skipped for suspected denial-of-service attacks.
The error response may contain information about the cause of the error.
Only errors where the Client is at fault should be explained, internal server errors should be returned without explanation.
Then finish, and accept the next request.
#### Configuration
The frontend must be configurable using a single, exported struct.
The struct must have YAML annotations.
The struct must implement `log.Fielder` to be logged on startup.
#### Metrics
Frontends may provide runtime metrics, such as the number of requests or their duration.
Metrics must be reported using [Prometheus].
A frontend should provide at least the following metrics:
- The number of valid and invalid requests handled
- The average time it takes to handle a single request.
This request timing should be made optional using a config entry.
Requests should be separated by type, i.e. Scrapes, Announces, and other protocol-specific requests.
If the frontend serves multiple transports or networks, metrics for them should be separable.
It is recommended to publish one Prometheus `HistogramVec` with:
- A name like `chihaya_PROTOCOL_response_duration_milliseconds`
- A value holding the duration in milliseconds of the reported request
- Labels for:
- `action` (= `announce`, `scrape`, ...)
- `address_family` (= `Unknown`, `IPv4`, `IPv6`, ...), if applicable
- `error` (= A textual representation of the error encountered during processing.)
Because `error` is expected to hold the textual representation of any error that occurred during the request, great care must be taken to ensure all error messages are static.
`error` must not contain any information directly taken from the request, e.g. the value of an invalid parameter.
This would cause this dimension of prometheus to explode, which slows down prometheus clients and reporters.
#### Error Handling
Frontends should return `bittorrent.ClientError`s to the Client.
Frontends must not return errors that are not a `bittorrent.ClientError` to the Client.
A message like `internal server error` should be used instead.
#### Request Sanitization
The `TrackerLogic` expects sanitized requests in order to function properly.
The `bittorrent` package provides the `SanitizeAnnounce` and `SanitizeScrape` functions to sanitize Announces and Scrapes, respectively.
This is the minimal required sanitization, every `AnnounceRequest` and `ScrapeRequest` must be sanitized this way.
Note that the `AnnounceRequest` struct contains booleans of the form `XProvided`, where `X` denotes an optional parameter of the BitTorrent protocol.
These should be set according to the values received by the Client.
#### Contexts
All methods of the `TrackerLogic` interface expect a `context.Context` as a parameter.
After a request is handled by `HandleAnnounce` without errors, the populated context returned must be used to call `AfterAnnounce`.
The same applies to Scrapes.
This way, a PreHook can communicate with a PostHook by setting a context value.
[BEP 3]: http://bittorrent.org/beps/bep_0003.html
[BEP 15]: http://bittorrent.org/beps/bep_0015.html
[Prometheus]: https://prometheus.io/
[old-opentracker-style]: https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/

View file

@ -1,35 +0,0 @@
# Announce Interval Variation Middleware
This package provides the announce middleware `interval variation` which randomizes the announce interval.
## Functionality
This middleware chooses random announces and modifies the `interval` and `min_interval` fields.
A random number of seconds are added to the `interval` field and, if desired, also to the `min_interval` field.
Note that if a response is picked for modification and `min_interval` should be changed as well, both `interval` and `min_interval` are modified by the same amount.
## Use Case
Use this middleware to avoid recurring load spikes on the tracker.
By randomizing the announce interval, load spikes will flatten out after a few announce cycles.
## Configuration
This middleware provides the following parameters for configuration:
- `modify_response_probability` (float, >0, <= 1) indicates the probability by which a response will be chosen to have its announce intervals modified.
- `max_increase_delta` (int, >0) sets an upper boundary (inclusive) for the amount of seconds added.
- `modify_min_interval` (boolean) whether to modify the `min_interval` field as well.
An example config might look like this:
```yaml
chihaya:
prehooks:
- name: interval variation
config:
modify_response_probability: 0.2
max_increase_delta: 60
modify_min_interval: true
```

View file

@ -1,86 +0,0 @@
# Redis Storage
This storage implementation separates Chihaya from its storage service.
Chihaya achieves HA by storing all peer data in Redis.
Multiple instances of Chihaya can use the same redis instance concurrently.
The storage service can get HA by clustering.
If one instance of Chihaya goes down, peer data will still be available in Redis.
The HA of storage service is not considered here.
In case Redis runs as a single node, peer data will be unavailable if the node is down.
You should consider setting up a Redis cluster for Chihaya in production.
This storage implementation is currently orders of magnitude slower than the in-memory implementation.
## Use Case
When one instance of Chihaya is down, other instances can continue serving peers from Redis.
## Configuration
```yaml
chihaya:
storage:
name: redis
config:
# The frequency which stale peers are removed.
# This balances between
# - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
# - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
gc_interval: 3m
# The interval at which metrics about the number of infohashes and peers
# are collected and posted to Prometheus.
prometheus_reporting_interval: 1s
# The amount of time until a peer is considered stale.
# To avoid churn, keep this slightly larger than `announce_interval`
peer_lifetime: 31m
# The address of redis storage.
redis_broker: "redis://pwd@127.0.0.1:6379/0"
# The timeout for reading a command reply from redis.
redis_read_timeout: 15s
# The timeout for writing a command to redis.
redis_write_timeout: 15s
# The timeout for connecting to redis server.
redis_connect_timeout: 15s
```
## Implementation
Seeders and Leechers for a particular InfoHash are stored within a redis hash.
The InfoHash is used as key, _peer keys_ are the fields, last modified times are values.
Peer keys are derived from peers and contain Peer ID, IP, and Port.
All the InfoHashes (swarms) are also stored in a redis hash, with IP family as the key, infohash as field, and last modified time as value.
Here is an example:
```yaml
- IPv4
- IPv4_S_<infohash 1>: <modification time>
- IPv4_L_<infohash 1>: <modification time>
- IPv4_S_<infohash 2>: <modification time>
- IPv4_S_<infohash 1>
- <peer 1 key>: <modification time>
- <peer 2 key>: <modification time>
- IPv4_L_<infohash 1>
- <peer 3 key>: <modification time>
- IPv4_S_<infohash 2>
- <peer 3 key>: <modification time>
```
In this case, prometheus would record two swarms, three seeders, and one leecher.
These three keys per address family are used to record the count of swarms, seeders, and leechers.
```yaml
- IPv4_infohash_count: 2
- IPv4_S_count: 3
- IPv4_L_count: 1
```
Note: `IPv4_infohash_count` has a different meaning compared to the `memory` storage:
It represents the number of infohashes reported by seeder, meaning that infohashes without seeders are not counted.

35
example_config.json Normal file
View file

@ -0,0 +1,35 @@
{
"createOnAnnounce": true,
"purgeInactiveTorrents": true,
"announce": "30m",
"minAnnounce": "15m",
"reapInterval": "60s",
"reapRatio": 1.25,
"defaultNumWant": 50,
"torrentMapShards": 1,
"jwkSetURI": "",
"jwkSetUpdateInterval": "5m",
"jwtAudience": "",
"allowIPSpoofing": true,
"dualStackedPeers": true,
"realIPHeader": "",
"respectAF": false,
"clientWhitelistEnabled": false,
"clientWhitelist": ["OP1011"],
"apiListenAddr": "localhost:6880",
"apiRequestTimeout": "4s",
"apiReadTimeout": "4s",
"apiWriteTimeout": "4s",
"apiListenLimit": 0,
"udpListenAddr": "localhost:6881",
"httpListenAddr": "localhost:6881",
"httpRequestTimeout": "4s",
"httpReadTimeout": "4s",
"httpWriteTimeout": "4s",
"httpListenLimit": 0,
"driver": "noop",
"statsBufferSize": 0,
"includeMemStats": true,
"verboseMemStats": false,
"memStatsInterval": "5s"
}

View file

@ -1,31 +0,0 @@
package frontend
import (
"context"
"github.com/chihaya/chihaya/bittorrent"
)
// TrackerLogic is the interface used by a frontend in order to: (1) generate a
// response from a parsed request, and (2) asynchronously observe anything
// after the response has been delivered to the client.
type TrackerLogic interface {
// HandleAnnounce generates a response for an Announce.
//
// Returns the updated context, the generated AnnounceResponse and no error
// on success; nil and error on failure.
HandleAnnounce(context.Context, *bittorrent.AnnounceRequest) (context.Context, *bittorrent.AnnounceResponse, error)
// AfterAnnounce does something with the results of an Announce after it
// has been completed.
AfterAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse)
// HandleScrape generates a response for a Scrape.
//
// Returns the updated context, the generated AnnounceResponse and no error
// on success; nil and error on failure.
HandleScrape(context.Context, *bittorrent.ScrapeRequest) (context.Context, *bittorrent.ScrapeResponse, error)
// AfterScrape does something with the results of a Scrape after it has been completed.
AfterScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse)
}

View file

@ -1,41 +0,0 @@
// Package bencode implements bencoding of data as defined in BEP 3 using
// type assertion over reflection for performance.
package bencode
import "bytes"
// Enforce that Dict implements the Marshaler interface.
var _ Marshaler = Dict{}
// Dict represents a bencode dictionary.
type Dict map[string]interface{}
// NewDict allocates the memory for a Dict.
func NewDict() Dict {
return make(Dict)
}
// MarshalBencode implements the Marshaler interface for Dict.
func (d Dict) MarshalBencode() ([]byte, error) {
var buf bytes.Buffer
err := marshalMap(&buf, map[string]interface{}(d))
return buf.Bytes(), err
}
// Enforce that List implements the Marshaler interface.
var _ Marshaler = List{}
// List represents a bencode list.
type List []interface{}
// MarshalBencode implements the Marshaler interface for List.
func (l List) MarshalBencode() ([]byte, error) {
var buf bytes.Buffer
err := marshalList(&buf, []interface{}(l))
return buf.Bytes(), err
}
// NewList allocates the memory for a List.
func NewList() List {
return make(List, 0)
}

View file

@ -1,141 +0,0 @@
package bencode
import (
"bufio"
"bytes"
"errors"
"io"
"strconv"
)
// A Decoder reads bencoded objects from an input stream.
type Decoder struct {
r *bufio.Reader
}
// NewDecoder returns a new decoder that reads from r.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: bufio.NewReader(r)}
}
// Decode unmarshals the next bencoded value in the stream.
func (dec *Decoder) Decode() (interface{}, error) {
return unmarshal(dec.r)
}
// Unmarshal deserializes and returns the bencoded value in buf.
func Unmarshal(buf []byte) (interface{}, error) {
r := bufio.NewReader(bytes.NewBuffer(buf))
return unmarshal(r)
}
// unmarshal reads bencoded values from a bufio.Reader
func unmarshal(r *bufio.Reader) (interface{}, error) {
tok, err := r.ReadByte()
if err != nil {
return nil, err
}
switch tok {
case 'i':
return readTerminatedInt(r, 'e')
case 'l':
return readList(r)
case 'd':
return readDict(r)
default:
err = r.UnreadByte()
if err != nil {
return nil, err
}
length, err := readTerminatedInt(r, ':')
if err != nil {
return nil, errors.New("bencode: unknown input sequence")
}
buf := make([]byte, length)
n, err := r.Read(buf)
if err != nil {
return nil, err
} else if int64(n) != length {
return nil, errors.New("bencode: short read")
}
return string(buf), nil
}
}
func readTerminator(r io.ByteScanner, term byte) (bool, error) {
tok, err := r.ReadByte()
if err != nil {
return false, err
} else if tok == term {
return true, nil
}
return false, r.UnreadByte()
}
func readTerminatedInt(r *bufio.Reader, term byte) (int64, error) {
buf, err := r.ReadSlice(term)
if err != nil {
return 0, err
} else if len(buf) <= 1 {
return 0, errors.New("bencode: empty integer field")
}
return strconv.ParseInt(string(buf[:len(buf)-1]), 10, 64)
}
func readList(r *bufio.Reader) (List, error) {
list := NewList()
for {
ok, err := readTerminator(r, 'e')
if err != nil {
return nil, err
} else if ok {
break
}
v, err := unmarshal(r)
if err != nil {
return nil, err
}
list = append(list, v)
}
return list, nil
}
func readDict(r *bufio.Reader) (Dict, error) {
dict := NewDict()
for {
ok, err := readTerminator(r, 'e')
if err != nil {
return nil, err
} else if ok {
break
}
v, err := unmarshal(r)
if err != nil {
return nil, err
}
key, ok := v.(string)
if !ok {
return nil, errors.New("bencode: non-string map key")
}
dict[key], err = unmarshal(r)
if err != nil {
return nil, err
}
}
return dict, nil
}

View file

@ -1,84 +0,0 @@
package bencode
import (
"testing"
"github.com/stretchr/testify/require"
)
var unmarshalTests = []struct {
input string
expected interface{}
}{
{"i42e", int64(42)},
{"i-42e", int64(-42)},
{"7:example", "example"},
{"l3:one3:twoe", List{"one", "two"}},
{"le", List{}},
{"d3:one2:aa3:two2:bbe", Dict{"one": "aa", "two": "bb"}},
{"de", Dict{}},
}
func TestUnmarshal(t *testing.T) {
for _, tt := range unmarshalTests {
t.Run(tt.input, func(t *testing.T) {
got, err := Unmarshal([]byte(tt.input))
require.Nil(t, err, "unmarshal should not fail")
require.Equal(t, got, tt.expected, "unmarshalled values should match the expected results")
})
}
}
type bufferLoop struct {
val string
}
func (r *bufferLoop) Read(b []byte) (int, error) {
n := copy(b, r.val)
return n, nil
}
func BenchmarkUnmarshalScalar(b *testing.B) {
d1 := NewDecoder(&bufferLoop{"7:example"})
d2 := NewDecoder(&bufferLoop{"i42e"})
for i := 0; i < b.N; i++ {
_, _ = d1.Decode()
_, _ = d2.Decode()
}
}
func TestUnmarshalLarge(t *testing.T) {
data := Dict{
"k1": List{"a", "b", "c"},
"k2": int64(42),
"k3": "val",
"k4": int64(-42),
}
buf, _ := Marshal(data)
dec := NewDecoder(&bufferLoop{string(buf)})
got, err := dec.Decode()
require.Nil(t, err, "decode should not fail")
require.Equal(t, got, data, "encoding and decoding should equal the original value")
}
func BenchmarkUnmarshalLarge(b *testing.B) {
data := map[string]interface{}{
"k1": []string{"a", "b", "c"},
"k2": 42,
"k3": "val",
"k4": uint(42),
}
buf, _ := Marshal(data)
dec := NewDecoder(&bufferLoop{string(buf)})
for i := 0; i < b.N; i++ {
_, _ = dec.Decode()
}
}

View file

@ -1,196 +0,0 @@
package bencode
import (
"bytes"
"fmt"
"io"
"strconv"
"time"
)
// An Encoder writes bencoded objects to an output stream.
type Encoder struct {
w io.Writer
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w}
}
// Encode writes the bencoding of v to the stream.
func (enc *Encoder) Encode(v interface{}) error {
return marshal(enc.w, v)
}
// Marshal returns the bencoding of v.
func Marshal(v interface{}) ([]byte, error) {
var buf bytes.Buffer
err := marshal(&buf, v)
return buf.Bytes(), err
}
// Marshaler is the interface implemented by objects that can marshal
// themselves.
type Marshaler interface {
MarshalBencode() ([]byte, error)
}
// marshal writes types bencoded to an io.Writer.
func marshal(w io.Writer, data interface{}) (err error) {
switch v := data.(type) {
case Marshaler:
var bencoded []byte
bencoded, err = v.MarshalBencode()
if err != nil {
return err
}
_, err = w.Write(bencoded)
case []byte:
err = marshalBytes(w, v)
case string:
err = marshalString(w, v)
case []string:
err = marshalStringSlice(w, v)
case int:
err = marshalInt(w, int64(v))
case int16:
err = marshalInt(w, int64(v))
case int32:
err = marshalInt(w, int64(v))
case int64:
err = marshalInt(w, v)
case uint:
err = marshalUint(w, uint64(v))
case uint16:
err = marshalUint(w, uint64(v))
case uint32:
err = marshalUint(w, uint64(v))
case uint64:
err = marshalUint(w, v)
case time.Duration: // Assume seconds
err = marshalInt(w, int64(v/time.Second))
case map[string]interface{}:
err = marshalMap(w, v)
case []interface{}:
err = marshalList(w, v)
case []Dict:
interfaceSlice := make([]interface{}, len(v))
for i, d := range v {
interfaceSlice[i] = d
}
err = marshalList(w, interfaceSlice)
default:
return fmt.Errorf("attempted to marshal unsupported type:\n%T", v)
}
return err
}
func marshalInt(w io.Writer, v int64) error {
if _, err := w.Write([]byte{'i'}); err != nil {
return err
}
if _, err := w.Write([]byte(strconv.FormatInt(v, 10))); err != nil {
return err
}
_, err := w.Write([]byte{'e'})
return err
}
func marshalUint(w io.Writer, v uint64) error {
if _, err := w.Write([]byte{'i'}); err != nil {
return err
}
if _, err := w.Write([]byte(strconv.FormatUint(v, 10))); err != nil {
return err
}
_, err := w.Write([]byte{'e'})
return err
}
func marshalBytes(w io.Writer, v []byte) error {
if _, err := w.Write([]byte(strconv.Itoa(len(v)))); err != nil {
return err
}
if _, err := w.Write([]byte{':'}); err != nil {
return err
}
_, err := w.Write(v)
return err
}
func marshalString(w io.Writer, v string) error {
return marshalBytes(w, []byte(v))
}
func marshalStringSlice(w io.Writer, v []string) error {
if _, err := w.Write([]byte{'l'}); err != nil {
return err
}
for _, val := range v {
if err := marshal(w, val); err != nil {
return err
}
}
_, err := w.Write([]byte{'e'})
return err
}
func marshalList(w io.Writer, v []interface{}) error {
if _, err := w.Write([]byte{'l'}); err != nil {
return err
}
for _, val := range v {
if err := marshal(w, val); err != nil {
return err
}
}
_, err := w.Write([]byte{'e'})
return err
}
func marshalMap(w io.Writer, v map[string]interface{}) error {
if _, err := w.Write([]byte{'d'}); err != nil {
return err
}
for key, val := range v {
if err := marshalString(w, key); err != nil {
return err
}
if err := marshal(w, val); err != nil {
return err
}
}
_, err := w.Write([]byte{'e'})
return err
}

View file

@ -1,72 +0,0 @@
package bencode
import (
"bytes"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
)
var marshalTests = []struct {
input interface{}
expected []string
}{
{int(42), []string{"i42e"}},
{int(-42), []string{"i-42e"}},
{uint(43), []string{"i43e"}},
{int64(44), []string{"i44e"}},
{uint64(45), []string{"i45e"}},
{int16(44), []string{"i44e"}},
{uint16(45), []string{"i45e"}},
{"example", []string{"7:example"}},
{[]byte("example"), []string{"7:example"}},
{30 * time.Minute, []string{"i1800e"}},
{[]string{"one", "two"}, []string{"l3:one3:twoe", "l3:two3:onee"}},
{[]interface{}{"one", "two"}, []string{"l3:one3:twoe", "l3:two3:onee"}},
{[]string{}, []string{"le"}},
{map[string]interface{}{"one": "aa", "two": "bb"}, []string{"d3:one2:aa3:two2:bbe", "d3:two2:bb3:one2:aae"}},
{map[string]interface{}{}, []string{"de"}},
{[]Dict{{"a": "b"}, {"c": "d"}}, []string{"ld1:a1:bed1:c1:dee", "ld1:c1:ded1:a1:bee"}},
}
func TestMarshal(t *testing.T) {
for _, tt := range marshalTests {
t.Run(fmt.Sprintf("%#v", tt.input), func(t *testing.T) {
got, err := Marshal(tt.input)
require.Nil(t, err, "marshal should not fail")
require.Contains(t, tt.expected, string(got), "the marshaled result should be one of the expected permutations")
})
}
}
func BenchmarkMarshalScalar(b *testing.B) {
buf := &bytes.Buffer{}
encoder := NewEncoder(buf)
for i := 0; i < b.N; i++ {
_ = encoder.Encode("test")
_ = encoder.Encode(123)
}
}
func BenchmarkMarshalLarge(b *testing.B) {
data := map[string]interface{}{
"k1": []string{"a", "b", "c"},
"k2": 42,
"k3": "val",
"k4": uint(42),
}
buf := &bytes.Buffer{}
encoder := NewEncoder(buf)
for i := 0; i < b.N; i++ {
_ = encoder.Encode(data)
}
}

View file

@ -1,401 +0,0 @@
// Package http implements a BitTorrent frontend via the HTTP protocol as
// described in BEP 3 and BEP 23.
package http
import (
"context"
"crypto/tls"
"errors"
"net"
"net/http"
"time"
"github.com/julienschmidt/httprouter"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
)
// Config represents all of the configurable options for an HTTP BitTorrent
// Frontend.
type Config struct {
Addr string `yaml:"addr"`
HTTPSAddr string `yaml:"https_addr"`
ReadTimeout time.Duration `yaml:"read_timeout"`
WriteTimeout time.Duration `yaml:"write_timeout"`
IdleTimeout time.Duration `yaml:"idle_timeout"`
EnableKeepAlive bool `yaml:"enable_keepalive"`
TLSCertPath string `yaml:"tls_cert_path"`
TLSKeyPath string `yaml:"tls_key_path"`
AnnounceRoutes []string `yaml:"announce_routes"`
ScrapeRoutes []string `yaml:"scrape_routes"`
EnableRequestTiming bool `yaml:"enable_request_timing"`
ParseOptions `yaml:",inline"`
}
// LogFields renders the current config as a set of Logrus fields.
func (cfg Config) LogFields() log.Fields {
return log.Fields{
"addr": cfg.Addr,
"httpsAddr": cfg.HTTPSAddr,
"readTimeout": cfg.ReadTimeout,
"writeTimeout": cfg.WriteTimeout,
"idleTimeout": cfg.IdleTimeout,
"enableKeepAlive": cfg.EnableKeepAlive,
"tlsCertPath": cfg.TLSCertPath,
"tlsKeyPath": cfg.TLSKeyPath,
"announceRoutes": cfg.AnnounceRoutes,
"scrapeRoutes": cfg.ScrapeRoutes,
"enableRequestTiming": cfg.EnableRequestTiming,
"allowIPSpoofing": cfg.AllowIPSpoofing,
"realIPHeader": cfg.RealIPHeader,
"maxNumWant": cfg.MaxNumWant,
"defaultNumWant": cfg.DefaultNumWant,
"maxScrapeInfoHashes": cfg.MaxScrapeInfoHashes,
}
}
// Default config constants.
const (
defaultReadTimeout = 2 * time.Second
defaultWriteTimeout = 2 * time.Second
defaultIdleTimeout = 30 * time.Second
)
// Validate sanity checks values set in a config and returns a new config with
// default values replacing anything that is invalid.
//
// This function warns to the logger when a value is changed.
func (cfg Config) Validate() Config {
validcfg := cfg
if cfg.ReadTimeout <= 0 {
validcfg.ReadTimeout = defaultReadTimeout
log.Warn("falling back to default configuration", log.Fields{
"name": "http.ReadTimeout",
"provided": cfg.ReadTimeout,
"default": validcfg.ReadTimeout,
})
}
if cfg.WriteTimeout <= 0 {
validcfg.WriteTimeout = defaultWriteTimeout
log.Warn("falling back to default configuration", log.Fields{
"name": "http.WriteTimeout",
"provided": cfg.WriteTimeout,
"default": validcfg.WriteTimeout,
})
}
if cfg.IdleTimeout <= 0 {
validcfg.IdleTimeout = defaultIdleTimeout
if cfg.EnableKeepAlive {
// If keepalive is disabled, this configuration isn't used anyway.
log.Warn("falling back to default configuration", log.Fields{
"name": "http.IdleTimeout",
"provided": cfg.IdleTimeout,
"default": validcfg.IdleTimeout,
})
}
}
if cfg.MaxNumWant <= 0 {
validcfg.MaxNumWant = defaultMaxNumWant
log.Warn("falling back to default configuration", log.Fields{
"name": "http.MaxNumWant",
"provided": cfg.MaxNumWant,
"default": validcfg.MaxNumWant,
})
}
if cfg.DefaultNumWant <= 0 {
validcfg.DefaultNumWant = defaultDefaultNumWant
log.Warn("falling back to default configuration", log.Fields{
"name": "http.DefaultNumWant",
"provided": cfg.DefaultNumWant,
"default": validcfg.DefaultNumWant,
})
}
if cfg.MaxScrapeInfoHashes <= 0 {
validcfg.MaxScrapeInfoHashes = defaultMaxScrapeInfoHashes
log.Warn("falling back to default configuration", log.Fields{
"name": "http.MaxScrapeInfoHashes",
"provided": cfg.MaxScrapeInfoHashes,
"default": validcfg.MaxScrapeInfoHashes,
})
}
return validcfg
}
// Frontend represents the state of an HTTP BitTorrent Frontend.
type Frontend struct {
srv *http.Server
tlsSrv *http.Server
tlsCfg *tls.Config
logic frontend.TrackerLogic
Config
}
// NewFrontend creates a new instance of an HTTP Frontend that asynchronously
// serves requests.
func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error) {
cfg := provided.Validate()
f := &Frontend{
logic: logic,
Config: cfg,
}
if cfg.Addr == "" && cfg.HTTPSAddr == "" {
return nil, errors.New("must specify addr or https_addr or both")
}
if len(cfg.AnnounceRoutes) < 1 || len(cfg.ScrapeRoutes) < 1 {
return nil, errors.New("must specify routes")
}
// If TLS is enabled, create a key pair.
if cfg.TLSCertPath != "" && cfg.TLSKeyPath != "" {
var err error
f.tlsCfg = &tls.Config{
MinVersion: tls.VersionTLS12,
Certificates: make([]tls.Certificate, 1),
}
f.tlsCfg.Certificates[0], err = tls.LoadX509KeyPair(cfg.TLSCertPath, cfg.TLSKeyPath)
if err != nil {
return nil, err
}
}
if cfg.HTTPSAddr != "" && f.tlsCfg == nil {
return nil, errors.New("must specify tls_cert_path and tls_key_path when using https_addr")
}
if cfg.HTTPSAddr == "" && f.tlsCfg != nil {
return nil, errors.New("must specify https_addr when using tls_cert_path and tls_key_path")
}
var listenerHTTP, listenerHTTPS net.Listener
var err error
if cfg.Addr != "" {
listenerHTTP, err = net.Listen("tcp", f.Addr)
if err != nil {
return nil, err
}
}
if cfg.HTTPSAddr != "" {
listenerHTTPS, err = net.Listen("tcp", f.HTTPSAddr)
if err != nil {
if listenerHTTP != nil {
listenerHTTP.Close()
}
return nil, err
}
}
if cfg.Addr != "" {
go func() {
if err := f.serveHTTP(listenerHTTP); err != nil {
log.Fatal("failed while serving http", log.Err(err))
}
}()
}
if cfg.HTTPSAddr != "" {
go func() {
if err := f.serveHTTPS(listenerHTTPS); err != nil {
log.Fatal("failed while serving https", log.Err(err))
}
}()
}
return f, nil
}
// Stop provides a thread-safe way to shutdown a currently running Frontend.
func (f *Frontend) Stop() stop.Result {
stopGroup := stop.NewGroup()
if f.srv != nil {
stopGroup.AddFunc(f.makeStopFunc(f.srv))
}
if f.tlsSrv != nil {
stopGroup.AddFunc(f.makeStopFunc(f.tlsSrv))
}
return stopGroup.Stop()
}
func (f *Frontend) makeStopFunc(stopSrv *http.Server) stop.Func {
return func() stop.Result {
c := make(stop.Channel)
go func() {
c.Done(stopSrv.Shutdown(context.Background()))
}()
return c.Result()
}
}
func (f *Frontend) handler() http.Handler {
router := httprouter.New()
for _, route := range f.AnnounceRoutes {
router.GET(route, f.announceRoute)
}
for _, route := range f.ScrapeRoutes {
router.GET(route, f.scrapeRoute)
}
return router
}
// serveHTTP blocks while listening and serving non-TLS HTTP BitTorrent
// requests until Stop() is called or an error is returned.
func (f *Frontend) serveHTTP(l net.Listener) error {
f.srv = &http.Server{
Addr: f.Addr,
Handler: f.handler(),
ReadTimeout: f.ReadTimeout,
WriteTimeout: f.WriteTimeout,
IdleTimeout: f.IdleTimeout,
}
f.srv.SetKeepAlivesEnabled(f.EnableKeepAlive)
// Start the HTTP server.
if err := f.srv.Serve(l); !errors.Is(err, http.ErrServerClosed) {
return err
}
return nil
}
// serveHTTPS blocks while listening and serving TLS HTTP BitTorrent
// requests until Stop() is called or an error is returned.
func (f *Frontend) serveHTTPS(l net.Listener) error {
f.tlsSrv = &http.Server{
Addr: f.HTTPSAddr,
TLSConfig: f.tlsCfg,
Handler: f.handler(),
ReadTimeout: f.ReadTimeout,
WriteTimeout: f.WriteTimeout,
}
f.tlsSrv.SetKeepAlivesEnabled(f.EnableKeepAlive)
// Start the HTTP server.
if err := f.tlsSrv.ServeTLS(l, "", ""); !errors.Is(err, http.ErrServerClosed) {
return err
}
return nil
}
func injectRouteParamsToContext(ctx context.Context, ps httprouter.Params) context.Context {
rp := bittorrent.RouteParams{}
for _, p := range ps {
rp = append(rp, bittorrent.RouteParam{Key: p.Key, Value: p.Value})
}
return context.WithValue(ctx, bittorrent.RouteParamsKey, rp)
}
// announceRoute parses and responds to an Announce.
func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
var err error
var start time.Time
if f.EnableRequestTiming {
start = time.Now()
}
var af *bittorrent.AddressFamily
defer func() {
if f.EnableRequestTiming {
recordResponseDuration("announce", af, err, time.Since(start))
} else {
recordResponseDuration("announce", af, err, time.Duration(0))
}
}()
req, err := ParseAnnounce(r, f.ParseOptions)
if err != nil {
_ = WriteError(w, err)
return
}
af = new(bittorrent.AddressFamily)
*af = req.IP.AddressFamily
ctx := injectRouteParamsToContext(context.Background(), ps)
ctx, resp, err := f.logic.HandleAnnounce(ctx, req)
if err != nil {
_ = WriteError(w, err)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
err = WriteAnnounceResponse(w, resp)
if err != nil {
_ = WriteError(w, err)
return
}
go f.logic.AfterAnnounce(ctx, req, resp)
}
// scrapeRoute parses and responds to a Scrape.
func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httprouter.Params) {
var err error
var start time.Time
if f.EnableRequestTiming {
start = time.Now()
}
var af *bittorrent.AddressFamily
defer func() {
if f.EnableRequestTiming {
recordResponseDuration("scrape", af, err, time.Since(start))
} else {
recordResponseDuration("scrape", af, err, time.Duration(0))
}
}()
req, err := ParseScrape(r, f.ParseOptions)
if err != nil {
_ = WriteError(w, err)
return
}
host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil {
log.Error("http: unable to determine remote address for scrape", log.Err(err))
_ = WriteError(w, err)
return
}
reqIP := net.ParseIP(host)
if reqIP.To4() != nil {
req.AddressFamily = bittorrent.IPv4
} else if len(reqIP) == net.IPv6len { // implies reqIP.To4() == nil
req.AddressFamily = bittorrent.IPv6
} else {
log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr})
_ = WriteError(w, bittorrent.ErrInvalidIP)
return
}
af = new(bittorrent.AddressFamily)
*af = req.AddressFamily
ctx := injectRouteParamsToContext(context.Background(), ps)
ctx, resp, err := f.logic.HandleScrape(ctx, req)
if err != nil {
_ = WriteError(w, err)
return
}
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
err = WriteScrapeResponse(w, resp)
if err != nil {
_ = WriteError(w, err)
return
}
go f.logic.AfterScrape(ctx, req, resp)
}

View file

@ -1,171 +0,0 @@
package http
import (
"errors"
"net"
"net/http"
"github.com/chihaya/chihaya/bittorrent"
)
// ParseOptions is the configuration used to parse an Announce Request.
//
// If AllowIPSpoofing is true, IPs provided via BitTorrent params will be used.
// If RealIPHeader is not empty string, the value of the first HTTP Header with
// that name will be used.
type ParseOptions struct {
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
RealIPHeader string `yaml:"real_ip_header"`
MaxNumWant uint32 `yaml:"max_numwant"`
DefaultNumWant uint32 `yaml:"default_numwant"`
MaxScrapeInfoHashes uint32 `yaml:"max_scrape_infohashes"`
}
// Default parser config constants.
const (
defaultMaxNumWant = 100
defaultDefaultNumWant = 50
defaultMaxScrapeInfoHashes = 50
)
// ParseAnnounce parses an bittorrent.AnnounceRequest from an http.Request.
func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequest, error) {
qp, err := bittorrent.ParseURLData(r.RequestURI)
if err != nil {
return nil, err
}
request := &bittorrent.AnnounceRequest{Params: qp}
// Attempt to parse the event from the request.
var eventStr string
eventStr, request.EventProvided = qp.String("event")
if request.EventProvided {
request.Event, err = bittorrent.NewEvent(eventStr)
if err != nil {
return nil, bittorrent.ClientError("failed to provide valid client event")
}
} else {
request.Event = bittorrent.None
}
// Determine if the client expects a compact response.
compactStr, _ := qp.String("compact")
request.Compact = compactStr != "" && compactStr != "0"
// Parse the infohash from the request.
infoHashes := qp.InfoHashes()
if len(infoHashes) < 1 {
return nil, bittorrent.ClientError("no info_hash parameter supplied")
}
if len(infoHashes) > 1 {
return nil, bittorrent.ClientError("multiple info_hash parameters supplied")
}
request.InfoHash = infoHashes[0]
// Parse the PeerID from the request.
peerID, ok := qp.String("peer_id")
if !ok {
return nil, bittorrent.ClientError("failed to parse parameter: peer_id")
}
if len(peerID) != 20 {
return nil, bittorrent.ClientError("failed to provide valid peer_id")
}
request.Peer.ID = bittorrent.PeerIDFromString(peerID)
// Determine the number of remaining bytes for the client.
request.Left, err = qp.Uint("left", 64)
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: left")
}
// Determine the number of bytes downloaded by the client.
request.Downloaded, err = qp.Uint("downloaded", 64)
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: downloaded")
}
// Determine the number of bytes shared by the client.
request.Uploaded, err = qp.Uint("uploaded", 64)
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: uploaded")
}
// Determine the number of peers the client wants in the response.
numwant, err := qp.Uint("numwant", 32)
if err != nil && !errors.Is(err, bittorrent.ErrKeyNotFound) {
return nil, bittorrent.ClientError("failed to parse parameter: numwant")
}
// If there were no errors, the user actually provided the numwant.
request.NumWantProvided = err == nil
request.NumWant = uint32(numwant)
// Parse the port where the client is listening.
port, err := qp.Uint("port", 16)
if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: port")
}
request.Peer.Port = uint16(port)
// Parse the IP address where the client is listening.
request.Peer.IP.IP, request.IPProvided = requestedIP(r, qp, opts)
if request.Peer.IP.IP == nil {
return nil, bittorrent.ClientError("failed to parse peer IP address")
}
if err := bittorrent.SanitizeAnnounce(request, opts.MaxNumWant, opts.DefaultNumWant); err != nil {
return nil, err
}
return request, nil
}
// ParseScrape parses an bittorrent.ScrapeRequest from an http.Request.
func ParseScrape(r *http.Request, opts ParseOptions) (*bittorrent.ScrapeRequest, error) {
qp, err := bittorrent.ParseURLData(r.RequestURI)
if err != nil {
return nil, err
}
infoHashes := qp.InfoHashes()
if len(infoHashes) < 1 {
return nil, bittorrent.ClientError("no info_hash parameter supplied")
}
request := &bittorrent.ScrapeRequest{
InfoHashes: infoHashes,
Params: qp,
}
if err := bittorrent.SanitizeScrape(request, opts.MaxScrapeInfoHashes); err != nil {
return nil, err
}
return request, nil
}
// requestedIP determines the IP address for a BitTorrent client request.
func requestedIP(r *http.Request, p bittorrent.Params, opts ParseOptions) (ip net.IP, provided bool) {
if opts.AllowIPSpoofing {
if ipstr, ok := p.String("ip"); ok {
return net.ParseIP(ipstr), true
}
if ipstr, ok := p.String("ipv4"); ok {
return net.ParseIP(ipstr), true
}
if ipstr, ok := p.String("ipv6"); ok {
return net.ParseIP(ipstr), true
}
}
if opts.RealIPHeader != "" {
if ip := r.Header.Get(opts.RealIPHeader); ip != "" {
return net.ParseIP(ip), false
}
}
host, _, _ := net.SplitHostPort(r.RemoteAddr)
return net.ParseIP(host), false
}

View file

@ -1,50 +0,0 @@
package http
import (
"errors"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/chihaya/chihaya/bittorrent"
)
func init() {
prometheus.MustRegister(promResponseDurationMilliseconds)
}
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chihaya_http_response_duration_milliseconds",
Help: "The duration of time it takes to receive and write a response to an API request",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
},
[]string{"action", "address_family", "error"},
)
// recordResponseDuration records the duration of time to respond to a Request
// in milliseconds.
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
var errString string
if err != nil {
var clientErr bittorrent.ClientError
if errors.As(err, &clientErr) {
errString = clientErr.Error()
} else {
errString = "internal error"
}
}
var afString string
if af == nil {
afString = "Unknown"
} else if *af == bittorrent.IPv4 {
afString = "IPv4"
} else if *af == bittorrent.IPv6 {
afString = "IPv6"
}
promResponseDurationMilliseconds.
WithLabelValues(action, afString, errString).
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}

View file

@ -1,118 +0,0 @@
package http
import (
"errors"
"net/http"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend/http/bencode"
"github.com/chihaya/chihaya/pkg/log"
)
// WriteError communicates an error to a BitTorrent client over HTTP.
func WriteError(w http.ResponseWriter, err error) error {
message := "internal server error"
var clientErr bittorrent.ClientError
if errors.As(err, &clientErr) {
message = clientErr.Error()
} else {
log.Error("http: internal error", log.Err(err))
}
w.WriteHeader(http.StatusOK)
return bencode.NewEncoder(w).Encode(bencode.Dict{
"failure reason": message,
})
}
// WriteAnnounceResponse communicates the results of an Announce to a
// BitTorrent client over HTTP.
func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceResponse) error {
bdict := bencode.Dict{
"complete": resp.Complete,
"incomplete": resp.Incomplete,
"interval": resp.Interval,
"min interval": resp.MinInterval,
}
// Add the peers to the dictionary in the compact format.
if resp.Compact {
var IPv4CompactDict, IPv6CompactDict []byte
// Add the IPv4 peers to the dictionary.
for _, peer := range resp.IPv4Peers {
IPv4CompactDict = append(IPv4CompactDict, compact4(peer)...)
}
if len(IPv4CompactDict) > 0 {
bdict["peers"] = IPv4CompactDict
}
// Add the IPv6 peers to the dictionary.
for _, peer := range resp.IPv6Peers {
IPv6CompactDict = append(IPv6CompactDict, compact6(peer)...)
}
if len(IPv6CompactDict) > 0 {
bdict["peers6"] = IPv6CompactDict
}
return bencode.NewEncoder(w).Encode(bdict)
}
// Add the peers to the dictionary.
peers := make([]bencode.Dict, 0, len(resp.IPv4Peers)+len(resp.IPv6Peers))
for _, peer := range resp.IPv4Peers {
peers = append(peers, dict(peer))
}
for _, peer := range resp.IPv6Peers {
peers = append(peers, dict(peer))
}
bdict["peers"] = peers
return bencode.NewEncoder(w).Encode(bdict)
}
// WriteScrapeResponse communicates the results of a Scrape to a BitTorrent
// client over HTTP.
func WriteScrapeResponse(w http.ResponseWriter, resp *bittorrent.ScrapeResponse) error {
filesDict := bencode.NewDict()
for _, scrape := range resp.Files {
filesDict[string(scrape.InfoHash[:])] = bencode.Dict{
"complete": scrape.Complete,
"incomplete": scrape.Incomplete,
}
}
return bencode.NewEncoder(w).Encode(bencode.Dict{
"files": filesDict,
})
}
func compact4(peer bittorrent.Peer) (buf []byte) {
if ip := peer.IP.To4(); ip == nil {
panic("non-IPv4 IP for Peer in IPv4Peers")
} else {
buf = []byte(ip)
}
buf = append(buf, byte(peer.Port>>8))
buf = append(buf, byte(peer.Port&0xff))
return
}
func compact6(peer bittorrent.Peer) (buf []byte) {
if ip := peer.IP.To16(); ip == nil {
panic("non-IPv6 IP for Peer in IPv6Peers")
} else {
buf = []byte(ip)
}
buf = append(buf, byte(peer.Port>>8))
buf = append(buf, byte(peer.Port&0xff))
return
}
func dict(peer bittorrent.Peer) bencode.Dict {
return bencode.Dict{
"peer id": string(peer.ID[:]),
"ip": peer.IP.String(),
"port": peer.Port,
}
}

View file

@ -1,46 +0,0 @@
package http
import (
"fmt"
"net/http/httptest"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
func TestWriteError(t *testing.T) {
table := []struct {
reason, expected string
}{
{"hello world", "d14:failure reason11:hello worlde"},
{"what's up", "d14:failure reason9:what's upe"},
}
for _, tt := range table {
t.Run(fmt.Sprintf("%s expecting %s", tt.reason, tt.expected), func(t *testing.T) {
r := httptest.NewRecorder()
err := WriteError(r, bittorrent.ClientError(tt.reason))
require.Nil(t, err)
require.Equal(t, r.Body.String(), tt.expected)
})
}
}
func TestWriteStatus(t *testing.T) {
table := []struct {
reason, expected string
}{
{"something is missing", "d14:failure reason20:something is missinge"},
}
for _, tt := range table {
t.Run(fmt.Sprintf("%s expecting %s", tt.reason, tt.expected), func(t *testing.T) {
r := httptest.NewRecorder()
err := WriteError(r, bittorrent.ClientError(tt.reason))
require.Nil(t, err)
require.Equal(t, r.Body.String(), tt.expected)
})
}
}

View file

@ -1,37 +0,0 @@
package bytepool
import "sync"
// BytePool is a cached pool of reusable byte slices.
type BytePool struct {
sync.Pool
}
// New allocates a new BytePool with slices of equal length and capacity.
func New(length int) *BytePool {
var bp BytePool
bp.Pool.New = func() interface{} {
b := make([]byte, length)
return &b
}
return &bp
}
// Get returns a byte slice from the pool.
func (bp *BytePool) Get() *[]byte {
return bp.Pool.Get().(*[]byte)
}
// Put returns a byte slice to the pool.
func (bp *BytePool) Put(b *[]byte) {
*b = (*b)[:cap(*b)]
// Zero out the bytes.
// This specific expression is optimized by the compiler:
// https://github.com/golang/go/issues/5373.
for i := range *b {
(*b)[i] = 0
}
bp.Pool.Put(b)
}

View file

@ -1,116 +0,0 @@
package udp
import (
"crypto/hmac"
"encoding/binary"
"hash"
"net"
"time"
sha256 "github.com/minio/sha256-simd"
"github.com/chihaya/chihaya/pkg/log"
)
// ttl is the duration a connection ID should be valid according to BEP 15.
const ttl = 2 * time.Minute
// NewConnectionID creates an 8-byte connection identifier for UDP packets as
// described by BEP 15.
// This is a wrapper around creating a new ConnectionIDGenerator and generating
// an ID. It is recommended to use the generator for performance.
func NewConnectionID(ip net.IP, now time.Time, key string) []byte {
return NewConnectionIDGenerator(key).Generate(ip, now)
}
// ValidConnectionID determines whether a connection identifier is legitimate.
// This is a wrapper around creating a new ConnectionIDGenerator and validating
// the ID. It is recommended to use the generator for performance.
func ValidConnectionID(connectionID []byte, ip net.IP, now time.Time, maxClockSkew time.Duration, key string) bool {
return NewConnectionIDGenerator(key).Validate(connectionID, ip, now, maxClockSkew)
}
// A ConnectionIDGenerator is a reusable generator and validator for connection
// IDs as described in BEP 15.
// It is not thread safe, but is safe to be pooled and reused by other
// goroutines. It manages its state itself, so it can be taken from and returned
// to a pool without any cleanup.
// After initial creation, it can generate connection IDs without allocating.
// See Generate and Validate for usage notes and guarantees.
type ConnectionIDGenerator struct {
// mac is a keyed HMAC that can be reused for subsequent connection ID
// generations.
mac hash.Hash
// connID is an 8-byte slice that holds the generated connection ID after a
// call to Generate.
// It must not be referenced after the generator is returned to a pool.
// It will be overwritten by subsequent calls to Generate.
connID []byte
// scratch is a 32-byte slice that is used as a scratchpad for the generated
// HMACs.
scratch []byte
}
// NewConnectionIDGenerator creates a new connection ID generator.
func NewConnectionIDGenerator(key string) *ConnectionIDGenerator {
return &ConnectionIDGenerator{
mac: hmac.New(sha256.New, []byte(key)),
connID: make([]byte, 8),
scratch: make([]byte, 32),
}
}
// reset resets the generator.
// This is called by other methods of the generator, it's not necessary to call
// it after getting a generator from a pool.
func (g *ConnectionIDGenerator) reset() {
g.mac.Reset()
g.connID = g.connID[:8]
g.scratch = g.scratch[:0]
}
// Generate generates an 8-byte connection ID as described in BEP 15 for the
// given IP and the current time.
//
// The first 4 bytes of the connection identifier is a unix timestamp and the
// last 4 bytes are a truncated HMAC token created from the aforementioned
// unix timestamp and the source IP address of the UDP packet.
//
// Truncated HMAC is known to be safe for 2^(-n) where n is the size in bits
// of the truncated HMAC token. In this use case we have 32 bits, thus a
// forgery probability of approximately 1 in 4 billion.
//
// The generated ID is written to g.connID, which is also returned. g.connID
// will be reused, so it must not be referenced after returning the generator
// to a pool and will be overwritten be subsequent calls to Generate!
func (g *ConnectionIDGenerator) Generate(ip net.IP, now time.Time) []byte {
g.reset()
binary.BigEndian.PutUint32(g.connID, uint32(now.Unix()))
g.mac.Write(g.connID[:4])
g.mac.Write(ip)
g.scratch = g.mac.Sum(g.scratch)
copy(g.connID[4:8], g.scratch[:4])
log.Debug("generated connection ID", log.Fields{"ip": ip, "now": now, "connID": g.connID})
return g.connID
}
// Validate validates the given connection ID for an IP and the current time.
func (g *ConnectionIDGenerator) Validate(connectionID []byte, ip net.IP, now time.Time, maxClockSkew time.Duration) bool {
ts := time.Unix(int64(binary.BigEndian.Uint32(connectionID[:4])), 0)
log.Debug("validating connection ID", log.Fields{"connID": connectionID, "ip": ip, "ts": ts, "now": now})
if now.After(ts.Add(ttl)) || ts.After(now.Add(maxClockSkew)) {
return false
}
g.reset()
g.mac.Write(connectionID[:4])
g.mac.Write(ip)
g.scratch = g.mac.Sum(g.scratch)
return hmac.Equal(g.scratch[:4], connectionID[4:])
}

View file

@ -1,193 +0,0 @@
package udp
import (
"crypto/hmac"
"encoding/binary"
"fmt"
"net"
"sync"
"testing"
"time"
sha256 "github.com/minio/sha256-simd"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/pkg/log"
)
var golden = []struct {
createdAt int64
now int64
ip string
key string
valid bool
}{
{0, 1, "127.0.0.1", "", true},
{0, 420420, "127.0.0.1", "", false},
{0, 0, "[::]", "", true},
}
// simpleNewConnectionID generates a new connection ID the explicit way.
// This is used to verify correct behaviour of the generator.
func simpleNewConnectionID(ip net.IP, now time.Time, key string) []byte {
buf := make([]byte, 8)
binary.BigEndian.PutUint32(buf, uint32(now.Unix()))
mac := hmac.New(sha256.New, []byte(key))
mac.Write(buf[:4])
mac.Write(ip)
macBytes := mac.Sum(nil)[:4]
copy(buf[4:], macBytes)
// this is just in here because logging impacts performance and we benchmark
// this version too.
log.Debug("manually generated connection ID", log.Fields{"ip": ip, "now": now, "connID": buf})
return buf
}
func TestVerification(t *testing.T) {
for _, tt := range golden {
t.Run(fmt.Sprintf("%s created at %d verified at %d", tt.ip, tt.createdAt, tt.now), func(t *testing.T) {
cid := NewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
got := ValidConnectionID(cid, net.ParseIP(tt.ip), time.Unix(tt.now, 0), time.Minute, tt.key)
if got != tt.valid {
t.Errorf("expected validity: %t got validity: %t", tt.valid, got)
}
})
}
}
func TestGeneration(t *testing.T) {
for _, tt := range golden {
t.Run(fmt.Sprintf("%s created at %d", tt.ip, tt.createdAt), func(t *testing.T) {
want := simpleNewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
got := NewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
require.Equal(t, want, got)
})
}
}
func TestReuseGeneratorGenerate(t *testing.T) {
for _, tt := range golden {
t.Run(fmt.Sprintf("%s created at %d", tt.ip, tt.createdAt), func(t *testing.T) {
cid := NewConnectionID(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0), tt.key)
require.Len(t, cid, 8)
gen := NewConnectionIDGenerator(tt.key)
for i := 0; i < 3; i++ {
connID := gen.Generate(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0))
require.Equal(t, cid, connID)
}
})
}
}
func TestReuseGeneratorValidate(t *testing.T) {
for _, tt := range golden {
t.Run(fmt.Sprintf("%s created at %d verified at %d", tt.ip, tt.createdAt, tt.now), func(t *testing.T) {
gen := NewConnectionIDGenerator(tt.key)
cid := gen.Generate(net.ParseIP(tt.ip), time.Unix(tt.createdAt, 0))
for i := 0; i < 3; i++ {
got := gen.Validate(cid, net.ParseIP(tt.ip), time.Unix(tt.now, 0), time.Minute)
if got != tt.valid {
t.Errorf("expected validity: %t got validity: %t", tt.valid, got)
}
}
})
}
}
func BenchmarkSimpleNewConnectionID(b *testing.B) {
ip := net.ParseIP("127.0.0.1")
key := "some random string that is hopefully at least this long"
createdAt := time.Now()
b.RunParallel(func(pb *testing.PB) {
sum := int64(0)
for pb.Next() {
cid := simpleNewConnectionID(ip, createdAt, key)
sum += int64(cid[7])
}
_ = sum
})
}
func BenchmarkNewConnectionID(b *testing.B) {
ip := net.ParseIP("127.0.0.1")
key := "some random string that is hopefully at least this long"
createdAt := time.Now()
b.RunParallel(func(pb *testing.PB) {
sum := int64(0)
for pb.Next() {
cid := NewConnectionID(ip, createdAt, key)
sum += int64(cid[7])
}
_ = sum
})
}
func BenchmarkConnectionIDGenerator_Generate(b *testing.B) {
ip := net.ParseIP("127.0.0.1")
key := "some random string that is hopefully at least this long"
createdAt := time.Now()
pool := &sync.Pool{
New: func() interface{} {
return NewConnectionIDGenerator(key)
},
}
b.RunParallel(func(pb *testing.PB) {
sum := int64(0)
for pb.Next() {
gen := pool.Get().(*ConnectionIDGenerator)
cid := gen.Generate(ip, createdAt)
sum += int64(cid[7])
pool.Put(gen)
}
})
}
func BenchmarkValidConnectionID(b *testing.B) {
ip := net.ParseIP("127.0.0.1")
key := "some random string that is hopefully at least this long"
createdAt := time.Now()
cid := NewConnectionID(ip, createdAt, key)
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
if !ValidConnectionID(cid, ip, createdAt, 10*time.Second, key) {
b.FailNow()
}
}
})
}
func BenchmarkConnectionIDGenerator_Validate(b *testing.B) {
ip := net.ParseIP("127.0.0.1")
key := "some random string that is hopefully at least this long"
createdAt := time.Now()
cid := NewConnectionID(ip, createdAt, key)
pool := &sync.Pool{
New: func() interface{} {
return NewConnectionIDGenerator(key)
},
}
b.RunParallel(func(pb *testing.PB) {
for pb.Next() {
gen := pool.Get().(*ConnectionIDGenerator)
if !gen.Validate(cid, ip, createdAt, 10*time.Second) {
b.FailNow()
}
pool.Put(gen)
}
})
}

View file

@ -1,363 +0,0 @@
// Package udp implements a BitTorrent tracker via the UDP protocol as
// described in BEP 15.
package udp
import (
"bytes"
"context"
"encoding/binary"
"errors"
"fmt"
"math/rand"
"net"
"sync"
"time"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/frontend/udp/bytepool"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
"github.com/chihaya/chihaya/pkg/timecache"
)
var allowedGeneratedPrivateKeyRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890")
// Config represents all of the configurable options for a UDP BitTorrent
// Tracker.
type Config struct {
Addr string `yaml:"addr"`
PrivateKey string `yaml:"private_key"`
MaxClockSkew time.Duration `yaml:"max_clock_skew"`
EnableRequestTiming bool `yaml:"enable_request_timing"`
ParseOptions `yaml:",inline"`
}
// LogFields renders the current config as a set of Logrus fields.
func (cfg Config) LogFields() log.Fields {
return log.Fields{
"addr": cfg.Addr,
"privateKey": cfg.PrivateKey,
"maxClockSkew": cfg.MaxClockSkew,
"enableRequestTiming": cfg.EnableRequestTiming,
"allowIPSpoofing": cfg.AllowIPSpoofing,
"maxNumWant": cfg.MaxNumWant,
"defaultNumWant": cfg.DefaultNumWant,
"maxScrapeInfoHashes": cfg.MaxScrapeInfoHashes,
}
}
// Validate sanity checks values set in a config and returns a new config with
// default values replacing anything that is invalid.
//
// This function warns to the logger when a value is changed.
func (cfg Config) Validate() Config {
validcfg := cfg
// Generate a private key if one isn't provided by the user.
if cfg.PrivateKey == "" {
rand.Seed(time.Now().UnixNano())
pkeyRunes := make([]rune, 64)
for i := range pkeyRunes {
pkeyRunes[i] = allowedGeneratedPrivateKeyRunes[rand.Intn(len(allowedGeneratedPrivateKeyRunes))]
}
validcfg.PrivateKey = string(pkeyRunes)
log.Warn("UDP private key was not provided, using generated key", log.Fields{"key": validcfg.PrivateKey})
}
if cfg.MaxNumWant <= 0 {
validcfg.MaxNumWant = defaultMaxNumWant
log.Warn("falling back to default configuration", log.Fields{
"name": "udp.MaxNumWant",
"provided": cfg.MaxNumWant,
"default": validcfg.MaxNumWant,
})
}
if cfg.DefaultNumWant <= 0 {
validcfg.DefaultNumWant = defaultDefaultNumWant
log.Warn("falling back to default configuration", log.Fields{
"name": "udp.DefaultNumWant",
"provided": cfg.DefaultNumWant,
"default": validcfg.DefaultNumWant,
})
}
if cfg.MaxScrapeInfoHashes <= 0 {
validcfg.MaxScrapeInfoHashes = defaultMaxScrapeInfoHashes
log.Warn("falling back to default configuration", log.Fields{
"name": "udp.MaxScrapeInfoHashes",
"provided": cfg.MaxScrapeInfoHashes,
"default": validcfg.MaxScrapeInfoHashes,
})
}
return validcfg
}
// Frontend holds the state of a UDP BitTorrent Frontend.
type Frontend struct {
socket *net.UDPConn
closing chan struct{}
wg sync.WaitGroup
genPool *sync.Pool
logic frontend.TrackerLogic
Config
}
// NewFrontend creates a new instance of an UDP Frontend that asynchronously
// serves requests.
func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error) {
cfg := provided.Validate()
f := &Frontend{
closing: make(chan struct{}),
logic: logic,
Config: cfg,
genPool: &sync.Pool{
New: func() interface{} {
return NewConnectionIDGenerator(cfg.PrivateKey)
},
},
}
if err := f.listen(); err != nil {
return nil, err
}
go func() {
if err := f.serve(); err != nil {
log.Fatal("failed while serving udp", log.Err(err))
}
}()
return f, nil
}
// Stop provides a thread-safe way to shutdown a currently running Frontend.
func (t *Frontend) Stop() stop.Result {
select {
case <-t.closing:
return stop.AlreadyStopped
default:
}
c := make(stop.Channel)
go func() {
close(t.closing)
_ = t.socket.SetReadDeadline(time.Now())
t.wg.Wait()
c.Done(t.socket.Close())
}()
return c.Result()
}
// listen resolves the address and binds the server socket.
func (t *Frontend) listen() error {
udpAddr, err := net.ResolveUDPAddr("udp", t.Addr)
if err != nil {
return err
}
t.socket, err = net.ListenUDP("udp", udpAddr)
return err
}
// serve blocks while listening and serving UDP BitTorrent requests
// until Stop() is called or an error is returned.
func (t *Frontend) serve() error {
pool := bytepool.New(2048)
t.wg.Add(1)
defer t.wg.Done()
for {
// Check to see if we need to shutdown.
select {
case <-t.closing:
log.Debug("udp serve() received shutdown signal")
return nil
default:
}
// Read a UDP packet into a reusable buffer.
buffer := pool.Get()
n, addr, err := t.socket.ReadFromUDP(*buffer)
if err != nil {
pool.Put(buffer)
var netErr net.Error
if errors.As(err, &netErr); netErr.Temporary() {
// A temporary failure is not fatal; just pretend it never happened.
continue
}
return err
}
// We got nothin'
if n == 0 {
pool.Put(buffer)
continue
}
t.wg.Add(1)
go func() {
defer t.wg.Done()
defer pool.Put(buffer)
if ip := addr.IP.To4(); ip != nil {
addr.IP = ip
}
// Handle the request.
var start time.Time
if t.EnableRequestTiming {
start = time.Now()
}
action, af, err := t.handleRequest(
// Make sure the IP is copied, not referenced.
Request{(*buffer)[:n], append([]byte{}, addr.IP...)},
ResponseWriter{t.socket, addr},
)
if t.EnableRequestTiming {
recordResponseDuration(action, af, err, time.Since(start))
} else {
recordResponseDuration(action, af, err, time.Duration(0))
}
}()
}
}
// Request represents a UDP payload received by a Tracker.
type Request struct {
Packet []byte
IP net.IP
}
// ResponseWriter implements the ability to respond to a Request via the
// io.Writer interface.
type ResponseWriter struct {
socket *net.UDPConn
addr *net.UDPAddr
}
// Write implements the io.Writer interface for a ResponseWriter.
func (w ResponseWriter) Write(b []byte) (int, error) {
_, _ = w.socket.WriteToUDP(b, w.addr)
return len(b), nil
}
// handleRequest parses and responds to a UDP Request.
func (t *Frontend) handleRequest(r Request, w ResponseWriter) (actionName string, af *bittorrent.AddressFamily, err error) {
if len(r.Packet) < 16 {
// Malformed, no client packets are less than 16 bytes.
// We explicitly return nothing in case this is a DoS attempt.
err = errMalformedPacket
return
}
// Parse the headers of the UDP packet.
connID := r.Packet[0:8]
actionID := binary.BigEndian.Uint32(r.Packet[8:12])
txID := r.Packet[12:16]
// get a connection ID generator/validator from the pool.
gen := t.genPool.Get().(*ConnectionIDGenerator)
defer t.genPool.Put(gen)
// If this isn't requesting a new connection ID and the connection ID is
// invalid, then fail.
if actionID != connectActionID && !gen.Validate(connID, r.IP, timecache.Now(), t.MaxClockSkew) {
err = errBadConnectionID
WriteError(w, txID, err)
return
}
// Handle the requested action.
switch actionID {
case connectActionID:
actionName = "connect"
if !bytes.Equal(connID, initialConnectionID) {
err = errMalformedPacket
return
}
af = new(bittorrent.AddressFamily)
if r.IP.To4() != nil {
*af = bittorrent.IPv4
} else if len(r.IP) == net.IPv6len { // implies r.IP.To4() == nil
*af = bittorrent.IPv6
} else {
// Should never happen - we got the IP straight from the UDP packet.
panic(fmt.Sprintf("udp: invalid IP: neither v4 nor v6, IP: %#v", r.IP))
}
WriteConnectionID(w, txID, gen.Generate(r.IP, timecache.Now()))
case announceActionID, announceV6ActionID:
actionName = "announce"
var req *bittorrent.AnnounceRequest
req, err = ParseAnnounce(r, actionID == announceV6ActionID, t.ParseOptions)
if err != nil {
WriteError(w, txID, err)
return
}
af = new(bittorrent.AddressFamily)
*af = req.IP.AddressFamily
var ctx context.Context
var resp *bittorrent.AnnounceResponse
ctx, resp, err = t.logic.HandleAnnounce(context.Background(), req)
if err != nil {
WriteError(w, txID, err)
return
}
WriteAnnounce(w, txID, resp, actionID == announceV6ActionID, req.IP.AddressFamily == bittorrent.IPv6)
go t.logic.AfterAnnounce(ctx, req, resp)
case scrapeActionID:
actionName = "scrape"
var req *bittorrent.ScrapeRequest
req, err = ParseScrape(r, t.ParseOptions)
if err != nil {
WriteError(w, txID, err)
return
}
if r.IP.To4() != nil {
req.AddressFamily = bittorrent.IPv4
} else if len(r.IP) == net.IPv6len { // implies r.IP.To4() == nil
req.AddressFamily = bittorrent.IPv6
} else {
// Should never happen - we got the IP straight from the UDP packet.
panic(fmt.Sprintf("udp: invalid IP: neither v4 nor v6, IP: %#v", r.IP))
}
af = new(bittorrent.AddressFamily)
*af = req.AddressFamily
var ctx context.Context
var resp *bittorrent.ScrapeResponse
ctx, resp, err = t.logic.HandleScrape(context.Background(), req)
if err != nil {
WriteError(w, txID, err)
return
}
WriteScrape(w, txID, resp)
go t.logic.AfterScrape(ctx, req, resp)
default:
err = errUnknownAction
WriteError(w, txID, err)
}
return
}

View file

@ -1,28 +0,0 @@
package udp_test
import (
"testing"
"github.com/chihaya/chihaya/frontend/udp"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/storage"
_ "github.com/chihaya/chihaya/storage/memory"
)
func TestStartStopRaceIssue437(t *testing.T) {
ps, err := storage.NewPeerStore("memory", nil)
if err != nil {
t.Fatal(err)
}
var responseConfig middleware.ResponseConfig
lgc := middleware.NewLogic(responseConfig, ps, nil, nil)
fe, err := udp.NewFrontend(lgc, udp.Config{Addr: "127.0.0.1:0"})
if err != nil {
t.Fatal(err)
}
errC := fe.Stop()
errs := <-errC
if len(errs) != 0 {
t.Fatal(errs[0])
}
}

View file

@ -1,229 +0,0 @@
package udp
import (
"bytes"
"encoding/binary"
"fmt"
"net"
"sync"
"github.com/chihaya/chihaya/bittorrent"
)
const (
connectActionID uint32 = iota
announceActionID
scrapeActionID
errorActionID
// action == 4 is the "old" IPv6 action used by opentracker, with a packet
// format specified at
// https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
announceV6ActionID
)
// Option-Types as described in BEP 41 and BEP 45.
const (
optionEndOfOptions byte = 0x0
optionNOP byte = 0x1
optionURLData byte = 0x2
)
var (
// initialConnectionID is the magic initial connection ID specified by BEP 15.
initialConnectionID = []byte{0, 0, 0x04, 0x17, 0x27, 0x10, 0x19, 0x80}
// eventIDs map values described in BEP 15 to Events.
eventIDs = []bittorrent.Event{
bittorrent.None,
bittorrent.Completed,
bittorrent.Started,
bittorrent.Stopped,
}
errMalformedPacket = bittorrent.ClientError("malformed packet")
errMalformedIP = bittorrent.ClientError("malformed IP address")
errMalformedEvent = bittorrent.ClientError("malformed event ID")
errUnknownAction = bittorrent.ClientError("unknown action ID")
errBadConnectionID = bittorrent.ClientError("bad connection ID")
errUnknownOptionType = bittorrent.ClientError("unknown option type")
)
// ParseOptions is the configuration used to parse an Announce Request.
//
// If AllowIPSpoofing is true, IPs provided via params will be used.
type ParseOptions struct {
AllowIPSpoofing bool `yaml:"allow_ip_spoofing"`
MaxNumWant uint32 `yaml:"max_numwant"`
DefaultNumWant uint32 `yaml:"default_numwant"`
MaxScrapeInfoHashes uint32 `yaml:"max_scrape_infohashes"`
}
// Default parser config constants.
const (
defaultMaxNumWant = 100
defaultDefaultNumWant = 50
defaultMaxScrapeInfoHashes = 50
)
// ParseAnnounce parses an AnnounceRequest from a UDP request.
//
// If v6Action is true, the announce is parsed the
// "old opentracker way":
// https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
func ParseAnnounce(r Request, v6Action bool, opts ParseOptions) (*bittorrent.AnnounceRequest, error) {
ipEnd := 84 + net.IPv4len
if v6Action {
ipEnd = 84 + net.IPv6len
}
if len(r.Packet) < ipEnd+10 {
return nil, errMalformedPacket
}
infohash := r.Packet[16:36]
peerID := r.Packet[36:56]
downloaded := binary.BigEndian.Uint64(r.Packet[56:64])
left := binary.BigEndian.Uint64(r.Packet[64:72])
uploaded := binary.BigEndian.Uint64(r.Packet[72:80])
eventID := int(r.Packet[83])
if eventID >= len(eventIDs) {
return nil, errMalformedEvent
}
ip := r.IP
ipProvided := false
ipbytes := r.Packet[84:ipEnd]
if opts.AllowIPSpoofing {
// Make sure the bytes are copied to a new slice.
copy(ip, net.IP(ipbytes))
ipProvided = true
}
if !opts.AllowIPSpoofing && r.IP == nil {
// We have no IP address to fallback on.
return nil, errMalformedIP
}
numWant := binary.BigEndian.Uint32(r.Packet[ipEnd+4 : ipEnd+8])
port := binary.BigEndian.Uint16(r.Packet[ipEnd+8 : ipEnd+10])
params, err := handleOptionalParameters(r.Packet[ipEnd+10:])
if err != nil {
return nil, err
}
request := &bittorrent.AnnounceRequest{
Event: eventIDs[eventID],
InfoHash: bittorrent.InfoHashFromBytes(infohash),
NumWant: numWant,
Left: left,
Downloaded: downloaded,
Uploaded: uploaded,
IPProvided: ipProvided,
NumWantProvided: true,
EventProvided: true,
Peer: bittorrent.Peer{
ID: bittorrent.PeerIDFromBytes(peerID),
IP: bittorrent.IP{IP: ip},
Port: port,
},
Params: params,
}
if err := bittorrent.SanitizeAnnounce(request, opts.MaxNumWant, opts.DefaultNumWant); err != nil {
return nil, err
}
return request, nil
}
type buffer struct {
bytes.Buffer
}
var bufferFree = sync.Pool{
New: func() interface{} { return new(buffer) },
}
func newBuffer() *buffer {
return bufferFree.Get().(*buffer)
}
func (b *buffer) free() {
b.Reset()
bufferFree.Put(b)
}
// handleOptionalParameters parses the optional parameters as described in BEP
// 41 and updates an announce with the values parsed.
func handleOptionalParameters(packet []byte) (bittorrent.Params, error) {
if len(packet) == 0 {
return bittorrent.ParseURLData("")
}
buf := newBuffer()
defer buf.free()
for i := 0; i < len(packet); {
option := packet[i]
switch option {
case optionEndOfOptions:
return bittorrent.ParseURLData(buf.String())
case optionNOP:
i++
case optionURLData:
if i+1 >= len(packet) {
return nil, errMalformedPacket
}
length := int(packet[i+1])
if i+2+length > len(packet) {
return nil, errMalformedPacket
}
n, err := buf.Write(packet[i+2 : i+2+length])
if err != nil {
return nil, err
}
if n != length {
return nil, fmt.Errorf("expected to write %d bytes, wrote %d", length, n)
}
i += 2 + length
default:
return nil, errUnknownOptionType
}
}
return bittorrent.ParseURLData(buf.String())
}
// ParseScrape parses a ScrapeRequest from a UDP request.
func ParseScrape(r Request, opts ParseOptions) (*bittorrent.ScrapeRequest, error) {
// If a scrape isn't at least 36 bytes long, it's malformed.
if len(r.Packet) < 36 {
return nil, errMalformedPacket
}
// Skip past the initial headers and check that the bytes left equal the
// length of a valid list of infohashes.
r.Packet = r.Packet[16:]
if len(r.Packet)%20 != 0 {
return nil, errMalformedPacket
}
// Allocate a list of infohashes and append it to the list until we're out.
var infohashes []bittorrent.InfoHash
for len(r.Packet) >= 20 {
infohashes = append(infohashes, bittorrent.InfoHashFromBytes(r.Packet[:20]))
r.Packet = r.Packet[20:]
}
// Sanitize the request.
request := &bittorrent.ScrapeRequest{InfoHashes: infohashes}
if err := bittorrent.SanitizeScrape(request, opts.MaxScrapeInfoHashes); err != nil {
return nil, err
}
return request, nil
}

View file

@ -1,77 +0,0 @@
package udp
import (
"errors"
"fmt"
"testing"
)
var table = []struct {
data []byte
values map[string]string
err error
}{
{
[]byte{0x2, 0x5, '/', '?', 'a', '=', 'b'},
map[string]string{"a": "b"},
nil,
},
{
[]byte{0x2, 0x0},
map[string]string{},
nil,
},
{
[]byte{0x2, 0x1},
nil,
errMalformedPacket,
},
{
[]byte{0x2},
nil,
errMalformedPacket,
},
{
[]byte{0x2, 0x8, '/', 'c', '/', 'd', '?', 'a', '=', 'b'},
map[string]string{"a": "b"},
nil,
},
{
[]byte{0x2, 0x2, '/', '?', 0x2, 0x3, 'a', '=', 'b'},
map[string]string{"a": "b"},
nil,
},
{
[]byte{0x2, 0x9, '/', '?', 'a', '=', 'b', '%', '2', '0', 'c'},
map[string]string{"a": "b c"},
nil,
},
}
func TestHandleOptionalParameters(t *testing.T) {
for _, tt := range table {
t.Run(fmt.Sprintf("%#v as %#v", tt.data, tt.values), func(t *testing.T) {
params, err := handleOptionalParameters(tt.data)
if !errors.Is(err, tt.err) {
if tt.err == nil {
t.Fatalf("expected no parsing error for %x but got %s", tt.data, err)
} else {
t.Fatalf("expected parsing error for %x", tt.data)
}
}
if tt.values != nil {
if params == nil {
t.Fatalf("expected values %v for %x", tt.values, tt.data)
} else {
for key, want := range tt.values {
if got, ok := params.String(key); !ok {
t.Fatalf("params missing entry %s for data %x", key, tt.data)
} else if got != want {
t.Fatalf("expected param %s=%s, but was %s for data %x", key, want, got, tt.data)
}
}
}
}
})
}
}

View file

@ -1,50 +0,0 @@
package udp
import (
"errors"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/chihaya/chihaya/bittorrent"
)
func init() {
prometheus.MustRegister(promResponseDurationMilliseconds)
}
var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
prometheus.HistogramOpts{
Name: "chihaya_udp_response_duration_milliseconds",
Help: "The duration of time it takes to receive and write a response to an API request",
Buckets: prometheus.ExponentialBuckets(9.375, 2, 10),
},
[]string{"action", "address_family", "error"},
)
// recordResponseDuration records the duration of time to respond to a UDP
// Request in milliseconds.
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
var errString string
if err != nil {
var clientErr bittorrent.ClientError
if errors.As(err, &clientErr) {
errString = clientErr.Error()
} else {
errString = "internal error"
}
}
var afString string
if af == nil {
afString = "Unknown"
} else if *af == bittorrent.IPv4 {
afString = "IPv4"
} else if *af == bittorrent.IPv6 {
afString = "IPv6"
}
promResponseDurationMilliseconds.
WithLabelValues(action, afString, errString).
Observe(float64(duration.Nanoseconds()) / float64(time.Millisecond))
}

View file

@ -1,92 +0,0 @@
package udp
import (
"encoding/binary"
"errors"
"fmt"
"io"
"time"
"github.com/chihaya/chihaya/bittorrent"
)
// WriteError writes the failure reason as a null-terminated string.
func WriteError(w io.Writer, txID []byte, err error) {
// If the client wasn't at fault, acknowledge it.
var clientErr bittorrent.ClientError
if !errors.As(err, &clientErr) {
err = fmt.Errorf("internal error occurred: %w", err)
}
buf := newBuffer()
writeHeader(buf, txID, errorActionID)
buf.WriteString(err.Error())
buf.WriteRune('\000')
_, _ = w.Write(buf.Bytes())
buf.free()
}
// WriteAnnounce encodes an announce response according to BEP 15.
// The peers returned will be resp.IPv6Peers or resp.IPv4Peers, depending on
// whether v6Peers is set.
// If v6Action is set, the action will be 4, according to
// https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/
func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse, v6Action, v6Peers bool) {
buf := newBuffer()
if v6Action {
writeHeader(buf, txID, announceV6ActionID)
} else {
writeHeader(buf, txID, announceActionID)
}
_ = binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
_ = binary.Write(buf, binary.BigEndian, resp.Incomplete)
_ = binary.Write(buf, binary.BigEndian, resp.Complete)
peers := resp.IPv4Peers
if v6Peers {
peers = resp.IPv6Peers
}
for _, peer := range peers {
buf.Write(peer.IP.IP)
_ = binary.Write(buf, binary.BigEndian, peer.Port)
}
_, _ = w.Write(buf.Bytes())
buf.free()
}
// WriteScrape encodes a scrape response according to BEP 15.
func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) {
buf := newBuffer()
writeHeader(buf, txID, scrapeActionID)
for _, scrape := range resp.Files {
_ = binary.Write(buf, binary.BigEndian, scrape.Complete)
_ = binary.Write(buf, binary.BigEndian, scrape.Snatches)
_ = binary.Write(buf, binary.BigEndian, scrape.Incomplete)
}
_, _ = w.Write(buf.Bytes())
buf.free()
}
// WriteConnectionID encodes a new connection response according to BEP 15.
func WriteConnectionID(w io.Writer, txID, connID []byte) {
buf := newBuffer()
writeHeader(buf, txID, connectActionID)
buf.Write(connID)
_, _ = w.Write(buf.Bytes())
buf.free()
}
// writeHeader writes the action and transaction ID to the provided response
// buffer.
func writeHeader(w io.Writer, txID []byte, action uint32) {
_ = binary.Write(w, binary.BigEndian, action)
_, _ = w.Write(txID)
}

28
go.mod
View file

@ -1,28 +0,0 @@
module github.com/chihaya/chihaya
go 1.16
require (
github.com/SermoDigital/jose v0.9.2-0.20180104203859-803625baeddc
github.com/alicebob/gopher-json v0.0.0-20180125190556-5a6b3ba71ee6 // indirect
github.com/alicebob/miniredis v2.5.0+incompatible
github.com/anacrolix/dht/v2 v2.15.1 // indirect
github.com/anacrolix/missinggo/v2 v2.5.3 // indirect
github.com/anacrolix/torrent v1.40.0
github.com/go-redsync/redsync/v4 v4.5.0
github.com/gomodule/redigo v1.8.8
github.com/julienschmidt/httprouter v1.3.0
github.com/klauspost/cpuid/v2 v2.0.9 // indirect
github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103
github.com/minio/sha256-simd v1.0.0
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/sirupsen/logrus v1.8.1
github.com/spf13/cobra v1.3.0
github.com/stretchr/testify v1.7.0
github.com/yuin/gopher-lua v0.0.0-20191220021717-ab39c6098bdb // indirect
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
gopkg.in/yaml.v2 v2.4.0
)

1625
go.sum

File diff suppressed because it is too large Load diff

299
http/announce_test.go Normal file
View file

@ -0,0 +1,299 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"net/http/httptest"
"reflect"
"strconv"
"testing"
"time"
"github.com/chihaya/bencode"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/tracker"
"github.com/chihaya/chihaya/tracker/models"
)
func TestPublicAnnounce(t *testing.T) {
srv, err := setupTracker(nil, nil)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
peer1 := makePeerParams("peer1", true)
peer2 := makePeerParams("peer2", true)
peer3 := makePeerParams("peer3", false)
peer1["event"] = "started"
expected := makeResponse(1, 0, peer1)
checkAnnounce(peer1, expected, srv, t)
expected = makeResponse(2, 0, peer2)
checkAnnounce(peer2, expected, srv, t)
expected = makeResponse(2, 1, peer1, peer2)
checkAnnounce(peer3, expected, srv, t)
peer1["event"] = "stopped"
expected = makeResponse(1, 1, nil)
checkAnnounce(peer1, expected, srv, t)
expected = makeResponse(1, 1, peer2)
checkAnnounce(peer3, expected, srv, t)
}
func TestTorrentPurging(t *testing.T) {
tkr, err := tracker.New(&config.DefaultConfig)
if err != nil {
t.Fatalf("failed to create new tracker instance: %s", err)
}
srv, err := setupTracker(nil, tkr)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
// Add one seeder.
peer := makePeerParams("peer1", true)
announce(peer, srv)
// Make sure the torrent was created.
_, err = tkr.FindTorrent(infoHash)
if err != nil {
t.Fatalf("expected torrent to exist after announce: %s", err)
}
// Remove seeder.
peer = makePeerParams("peer1", true)
peer["event"] = "stopped"
announce(peer, srv)
_, err = tkr.FindTorrent(infoHash)
if err != models.ErrTorrentDNE {
t.Fatalf("expected torrent to have been purged: %s", err)
}
}
func TestStalePeerPurging(t *testing.T) {
cfg := config.DefaultConfig
cfg.MinAnnounce = config.Duration{10 * time.Millisecond}
cfg.ReapInterval = config.Duration{10 * time.Millisecond}
tkr, err := tracker.New(&cfg)
if err != nil {
t.Fatalf("failed to create new tracker instance: %s", err)
}
srv, err := setupTracker(&cfg, tkr)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
// Add one seeder.
peer1 := makePeerParams("peer1", true)
announce(peer1, srv)
// Make sure the torrent was created.
_, err = tkr.FindTorrent(infoHash)
if err != nil {
t.Fatalf("expected torrent to exist after announce: %s", err)
}
// Add a leecher.
peer2 := makePeerParams("peer2", false)
expected := makeResponse(1, 1, peer1)
expected["min interval"] = int64(0)
checkAnnounce(peer2, expected, srv, t)
// Let them both expire.
time.Sleep(30 * time.Millisecond)
_, err = tkr.FindTorrent(infoHash)
if err != models.ErrTorrentDNE {
t.Fatalf("expected torrent to have been purged: %s", err)
}
}
func TestPreferredSubnet(t *testing.T) {
cfg := config.DefaultConfig
cfg.PreferredSubnet = true
cfg.PreferredIPv4Subnet = 8
cfg.PreferredIPv6Subnet = 16
cfg.DualStackedPeers = false
srv, err := setupTracker(&cfg, nil)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
peerA1 := makePeerParams("peerA1", false, "44.0.0.1")
peerA2 := makePeerParams("peerA2", false, "44.0.0.2")
peerA3 := makePeerParams("peerA3", false, "44.0.0.3")
peerA4 := makePeerParams("peerA4", false, "44.0.0.4")
peerB1 := makePeerParams("peerB1", false, "45.0.0.1")
peerB2 := makePeerParams("peerB2", false, "45.0.0.2")
peerC1 := makePeerParams("peerC1", false, "fc01::1")
peerC2 := makePeerParams("peerC2", false, "fc01::2")
peerC3 := makePeerParams("peerC3", false, "fc01::3")
peerD1 := makePeerParams("peerD1", false, "fc02::1")
peerD2 := makePeerParams("peerD2", false, "fc02::2")
expected := makeResponse(0, 1, peerA1)
checkAnnounce(peerA1, expected, srv, t)
expected = makeResponse(0, 2, peerA1)
checkAnnounce(peerA2, expected, srv, t)
expected = makeResponse(0, 3, peerA1, peerA2)
checkAnnounce(peerB1, expected, srv, t)
peerB2["numwant"] = "1"
expected = makeResponse(0, 4, peerB1)
checkAnnounce(peerB2, expected, srv, t)
checkAnnounce(peerB2, expected, srv, t)
peerA3["numwant"] = "2"
expected = makeResponse(0, 5, peerA1, peerA2)
checkAnnounce(peerA3, expected, srv, t)
checkAnnounce(peerA3, expected, srv, t)
peerA4["numwant"] = "3"
expected = makeResponse(0, 6, peerA1, peerA2, peerA3)
checkAnnounce(peerA4, expected, srv, t)
checkAnnounce(peerA4, expected, srv, t)
expected = makeResponse(0, 7, peerA1, peerA2, peerA3, peerA4, peerB1, peerB2)
checkAnnounce(peerC1, expected, srv, t)
peerC2["numwant"] = "1"
expected = makeResponse(0, 8, peerC1)
checkAnnounce(peerC2, expected, srv, t)
checkAnnounce(peerC2, expected, srv, t)
peerC3["numwant"] = "2"
expected = makeResponse(0, 9, peerC1, peerC2)
checkAnnounce(peerC3, expected, srv, t)
checkAnnounce(peerC3, expected, srv, t)
expected = makeResponse(0, 10, peerA1, peerA2, peerA3, peerA4, peerB1, peerB2, peerC1, peerC2, peerC3)
checkAnnounce(peerD1, expected, srv, t)
peerD2["numwant"] = "1"
expected = makeResponse(0, 11, peerD1)
checkAnnounce(peerD2, expected, srv, t)
checkAnnounce(peerD2, expected, srv, t)
}
func TestCompactAnnounce(t *testing.T) {
srv, err := setupTracker(nil, nil)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
compact := "\xff\x09\x7f\x05\x04\xd2"
ip := "255.9.127.5" // Use the same IP for all of them so we don't have to worry about order.
peer1 := makePeerParams("peer1", false, ip)
peer1["compact"] = "1"
peer2 := makePeerParams("peer2", false, ip)
peer2["compact"] = "1"
peer3 := makePeerParams("peer3", false, ip)
peer3["compact"] = "1"
expected := makeResponse(0, 1)
expected["peers"] = compact
checkAnnounce(peer1, expected, srv, t)
expected = makeResponse(0, 2)
expected["peers"] = compact
checkAnnounce(peer2, expected, srv, t)
expected = makeResponse(0, 3)
expected["peers"] = compact + compact
checkAnnounce(peer3, expected, srv, t)
}
func makePeerParams(id string, seed bool, extra ...string) params {
left := "1"
if seed {
left = "0"
}
ip := "10.0.0.1"
if len(extra) >= 1 {
ip = extra[0]
}
return params{
"info_hash": infoHash,
"peer_id": id,
"ip": ip,
"port": "1234",
"uploaded": "0",
"downloaded": "0",
"left": left,
"compact": "0",
"numwant": "50",
}
}
func peerFromParams(peer params) bencode.Dict {
port, _ := strconv.ParseInt(peer["port"], 10, 64)
return bencode.Dict{
"peer id": peer["peer_id"],
"ip": peer["ip"],
"port": port,
}
}
func makeResponse(seeders, leechers int64, peers ...params) bencode.Dict {
dict := bencode.Dict{
"complete": seeders,
"incomplete": leechers,
"interval": int64(1800),
"min interval": int64(900),
}
if !(len(peers) == 1 && peers[0] == nil) {
peerList := bencode.List{}
for _, peer := range peers {
peerList = append(peerList, peerFromParams(peer))
}
dict["peers"] = peerList
}
return dict
}
func checkAnnounce(p params, expected interface{}, srv *httptest.Server, t *testing.T) bool {
body, err := announce(p, srv)
if err != nil {
t.Error(err)
return false
}
if e, ok := expected.(bencode.Dict); ok {
sortPeersInResponse(e)
}
got, err := bencode.Unmarshal(body)
if e, ok := got.(bencode.Dict); ok {
sortPeersInResponse(e)
}
if !reflect.DeepEqual(got, expected) {
t.Errorf("\ngot: %#v\nwanted: %#v", got, expected)
return false
}
return true
}

152
http/http.go Normal file
View file

@ -0,0 +1,152 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package http implements a BitTorrent tracker over the HTTP protocol as per
// BEP 3.
package http
import (
"net"
"net/http"
"time"
"github.com/golang/glog"
"github.com/julienschmidt/httprouter"
"github.com/tylerb/graceful"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker"
)
// ResponseHandler is an HTTP handler that returns a status code.
type ResponseHandler func(http.ResponseWriter, *http.Request, httprouter.Params) (int, error)
// Server represents an HTTP serving torrent tracker.
type Server struct {
config *config.Config
tracker *tracker.Tracker
grace *graceful.Server
stopping bool
}
// makeHandler wraps our ResponseHandlers while timing requests, collecting,
// stats, logging, and handling errors.
func makeHandler(handler ResponseHandler) httprouter.Handle {
return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) {
start := time.Now()
httpCode, err := handler(w, r, p)
duration := time.Since(start)
var msg string
if err != nil {
msg = err.Error()
} else if httpCode != http.StatusOK {
msg = http.StatusText(httpCode)
}
if len(msg) > 0 {
http.Error(w, msg, httpCode)
stats.RecordEvent(stats.ErroredRequest)
}
if len(msg) > 0 || glog.V(2) {
reqString := r.URL.Path + " " + r.RemoteAddr
if glog.V(3) {
reqString = r.URL.RequestURI() + " " + r.RemoteAddr
}
if len(msg) > 0 {
glog.Errorf("[HTTP - %9s] %s (%d - %s)", duration, reqString, httpCode, msg)
} else {
glog.Infof("[HTTP - %9s] %s (%d)", duration, reqString, httpCode)
}
}
stats.RecordEvent(stats.HandledRequest)
stats.RecordTiming(stats.ResponseTime, duration)
}
}
// newRouter returns a router with all the routes.
func newRouter(s *Server) *httprouter.Router {
r := httprouter.New()
r.GET("/announce", makeHandler(s.serveAnnounce))
r.GET("/scrape", makeHandler(s.serveScrape))
return r
}
// connState is used by graceful in order to gracefully shutdown. It also
// keeps track of connection stats.
func (s *Server) connState(conn net.Conn, state http.ConnState) {
switch state {
case http.StateNew:
stats.RecordEvent(stats.AcceptedConnection)
case http.StateClosed:
stats.RecordEvent(stats.ClosedConnection)
case http.StateHijacked:
panic("connection impossibly hijacked")
// Ignore the following cases.
case http.StateActive, http.StateIdle:
default:
glog.Errorf("Connection transitioned to unknown state %s (%d)", state, state)
}
}
// Serve runs an HTTP server, blocking until the server has shut down.
func (s *Server) Serve() {
glog.V(0).Info("Starting HTTP on ", s.config.HTTPConfig.ListenAddr)
if s.config.HTTPConfig.ListenLimit != 0 {
glog.V(0).Info("Limiting connections to ", s.config.HTTPConfig.ListenLimit)
}
grace := &graceful.Server{
Timeout: s.config.HTTPConfig.RequestTimeout.Duration,
ConnState: s.connState,
ListenLimit: s.config.HTTPConfig.ListenLimit,
NoSignalHandling: true,
Server: &http.Server{
Addr: s.config.HTTPConfig.ListenAddr,
Handler: newRouter(s),
ReadTimeout: s.config.HTTPConfig.ReadTimeout.Duration,
WriteTimeout: s.config.HTTPConfig.WriteTimeout.Duration,
},
}
s.grace = grace
grace.SetKeepAlivesEnabled(false)
grace.ShutdownInitiated = func() { s.stopping = true }
if err := grace.ListenAndServe(); err != nil {
if opErr, ok := err.(*net.OpError); !ok || (ok && opErr.Op != "accept") {
glog.Errorf("Failed to gracefully run HTTP server: %s", err.Error())
return
}
}
glog.Info("HTTP server shut down cleanly")
}
// Stop cleanly shuts down the server.
func (s *Server) Stop() {
if !s.stopping {
s.grace.Stop(s.grace.Timeout)
}
}
// NewServer returns a new HTTP server for a given configuration and tracker.
func NewServer(cfg *config.Config, tkr *tracker.Tracker) *Server {
return &Server{
config: cfg,
tracker: tkr,
}
}

90
http/http_test.go Normal file
View file

@ -0,0 +1,90 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"sort"
"github.com/chihaya/bencode"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker"
)
type params map[string]string
var infoHash = string([]byte{0x89, 0xd4, 0xbc, 0x52, 0x11, 0x16, 0xca, 0x1d, 0x42, 0xa2, 0xf3, 0x0d, 0x1f, 0x27, 0x4d, 0x94, 0xe4, 0x68, 0x1d, 0xaf})
func init() {
stats.DefaultStats = stats.New(config.StatsConfig{})
}
func setupTracker(cfg *config.Config, tkr *tracker.Tracker) (*httptest.Server, error) {
if cfg == nil {
cfg = &config.DefaultConfig
}
if tkr == nil {
var err error
tkr, err = tracker.New(cfg)
if err != nil {
return nil, err
}
}
return createServer(tkr, cfg)
}
func createServer(tkr *tracker.Tracker, cfg *config.Config) (*httptest.Server, error) {
srv := &Server{
config: cfg,
tracker: tkr,
}
return httptest.NewServer(newRouter(srv)), nil
}
func announce(p params, srv *httptest.Server) ([]byte, error) {
values := &url.Values{}
for k, v := range p {
values.Add(k, v)
}
body, _, err := fetchPath(srv.URL + "/announce?" + values.Encode())
return body, err
}
func fetchPath(path string) ([]byte, int, error) {
response, err := http.Get(path)
if err != nil {
return nil, 0, err
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
return body, response.StatusCode, err
}
type peerList bencode.List
func (p peerList) Len() int {
return len(p)
}
func (p peerList) Less(i, j int) bool {
return p[i].(bencode.Dict)["peer id"].(string) < p[j].(bencode.Dict)["peer id"].(string)
}
func (p peerList) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func sortPeersInResponse(dict bencode.Dict) {
if peers, ok := dict["peers"].(bencode.List); ok {
sort.Stable(peerList(peers))
}
}

112
http/query/query.go Normal file
View file

@ -0,0 +1,112 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
// Package query implements a faster single-purpose URL Query parser.
package query
import (
"errors"
"net/url"
"strconv"
"strings"
)
// Query represents a parsed URL.Query.
type Query struct {
Infohashes []string
Params map[string]string
}
// New parses a raw url query.
func New(query string) (*Query, error) {
var (
keyStart, keyEnd int
valStart, valEnd int
firstInfohash string
onKey = true
hasInfohash = false
q = &Query{
Infohashes: nil,
Params: make(map[string]string),
}
)
for i, length := 0, len(query); i < length; i++ {
separator := query[i] == '&' || query[i] == ';' || query[i] == '?'
last := i == length-1
if separator || last {
if onKey && !last {
keyStart = i + 1
continue
}
if last && !separator && !onKey {
valEnd = i
}
keyStr, err := url.QueryUnescape(query[keyStart : keyEnd+1])
if err != nil {
return nil, err
}
var valStr string
if valEnd > 0 {
valStr, err = url.QueryUnescape(query[valStart : valEnd+1])
if err != nil {
return nil, err
}
}
q.Params[strings.ToLower(keyStr)] = valStr
if keyStr == "info_hash" {
if hasInfohash {
// Multiple infohashes
if q.Infohashes == nil {
q.Infohashes = []string{firstInfohash}
}
q.Infohashes = append(q.Infohashes, valStr)
} else {
firstInfohash = valStr
hasInfohash = true
}
}
valEnd = 0
onKey = true
keyStart = i + 1
} else if query[i] == '=' {
onKey = false
valStart = i + 1
valEnd = 0
} else if onKey {
keyEnd = i
} else {
valEnd = i
}
}
return q, nil
}
// Uint64 is a helper to obtain a uint of any length from a Query. After being
// called, you can safely cast the uint64 to your desired length.
func (q *Query) Uint64(key string) (uint64, error) {
str, exists := q.Params[key]
if !exists {
return 0, errors.New("value does not exist for key: " + key)
}
val, err := strconv.ParseUint(str, 10, 64)
if err != nil {
return 0, err
}
return val, nil
}

100
http/query/query_test.go Normal file
View file

@ -0,0 +1,100 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package query
import (
"net/url"
"testing"
)
var (
baseAddr = "https://www.subdomain.tracker.com:80/"
testInfoHash = "01234567890123456789"
testPeerID = "-TEST01-6wfG2wk6wWLc"
ValidAnnounceArguments = []url.Values{
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "numwant": {"28"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"stopped"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "ip": {"192.168.0.1"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "event": {"started"}, "numwant": {"13"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "no_peer_id": {"1"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {testPeerID}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {"%3Ckey%3A+0x90%3E"}, "port": {"6881"}, "downloaded": {"1234"}, "left": {"4321"}, "compact": {"0"}, "no_peer_id": {"1"}, "key": {"peerKey"}, "trackerid": {"trackerId"}},
url.Values{"info_hash": {testInfoHash}, "peer_id": {"%3Ckey%3A+0x90%3E"}, "compact": {"1"}},
url.Values{"info_hash": {""}, "peer_id": {""}, "compact": {""}},
}
InvalidQueries = []string{
baseAddr + "announce/?" + "info_hash=%0%a",
}
)
func mapArrayEqual(boxed map[string][]string, unboxed map[string]string) bool {
if len(boxed) != len(unboxed) {
return false
}
for mapKey, mapVal := range boxed {
// Always expect box to hold only one element
if len(mapVal) != 1 || mapVal[0] != unboxed[mapKey] {
return false
}
}
return true
}
func TestValidQueries(t *testing.T) {
for parseIndex, parseVal := range ValidAnnounceArguments {
parsedQueryObj, err := New(baseAddr + "announce/?" + parseVal.Encode())
if err != nil {
t.Error(err)
}
if !mapArrayEqual(parseVal, parsedQueryObj.Params) {
t.Errorf("Incorrect parse at item %d.\n Expected=%v\n Recieved=%v\n", parseIndex, parseVal, parsedQueryObj.Params)
}
}
}
func TestInvalidQueries(t *testing.T) {
for parseIndex, parseStr := range InvalidQueries {
parsedQueryObj, err := New(parseStr)
if err == nil {
t.Error("Should have produced error", parseIndex)
}
if parsedQueryObj != nil {
t.Error("Should be nil after error", parsedQueryObj, parseIndex)
}
}
}
func BenchmarkParseQuery(b *testing.B) {
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := New(baseAddr + "announce/?" + parseStr.Encode())
if err != nil {
b.Error(err, parseIndex)
b.Log(parsedQueryObj)
}
}
}
}
func BenchmarkURLParseQuery(b *testing.B) {
for bCount := 0; bCount < b.N; bCount++ {
for parseIndex, parseStr := range ValidAnnounceArguments {
parsedQueryObj, err := url.ParseQuery(baseAddr + "announce/?" + parseStr.Encode())
if err != nil {
b.Error(err, parseIndex)
b.Log(parsedQueryObj)
}
}
}
}

46
http/routes.go Normal file
View file

@ -0,0 +1,46 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"net/http"
"github.com/julienschmidt/httprouter"
"github.com/chihaya/chihaya/stats"
"github.com/chihaya/chihaya/tracker/models"
)
func handleTorrentError(err error, w *Writer) (int, error) {
if err == nil {
return http.StatusOK, nil
} else if models.IsPublicError(err) {
w.WriteError(err)
stats.RecordEvent(stats.ClientError)
return http.StatusOK, nil
}
return http.StatusInternalServerError, err
}
func (s *Server) serveAnnounce(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
writer := &Writer{w}
ann, err := s.newAnnounce(r, p)
if err != nil {
return handleTorrentError(err, writer)
}
return handleTorrentError(s.tracker.HandleAnnounce(ann, writer), writer)
}
func (s *Server) serveScrape(w http.ResponseWriter, r *http.Request, p httprouter.Params) (int, error) {
writer := &Writer{w}
scrape, err := s.newScrape(r, p)
if err != nil {
return handleTorrentError(err, writer)
}
return handleTorrentError(s.tracker.HandleScrape(scrape, writer), writer)
}

98
http/scrape_test.go Normal file
View file

@ -0,0 +1,98 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"io/ioutil"
"net/http"
"net/http/httptest"
"net/url"
"reflect"
"testing"
"github.com/chihaya/bencode"
)
func TestPublicScrape(t *testing.T) {
srv, err := setupTracker(nil, nil)
if err != nil {
t.Fatal(err)
}
defer srv.Close()
scrapeParams := params{"info_hash": infoHash}
// Add one seeder.
peer := makePeerParams("peer1", true)
announce(peer, srv)
checkScrape(scrapeParams, makeScrapeResponse(1, 0, 0), srv, t)
// Add another seeder.
peer = makePeerParams("peer2", true)
announce(peer, srv)
checkScrape(scrapeParams, makeScrapeResponse(2, 0, 0), srv, t)
// Add a leecher.
peer = makePeerParams("peer3", false)
announce(peer, srv)
checkScrape(scrapeParams, makeScrapeResponse(2, 1, 0), srv, t)
// Remove seeder.
peer = makePeerParams("peer1", true)
peer["event"] = "stopped"
announce(peer, srv)
checkScrape(scrapeParams, makeScrapeResponse(1, 1, 0), srv, t)
// Complete torrent.
peer = makePeerParams("peer3", true)
peer["event"] = "complete"
announce(peer, srv)
checkScrape(scrapeParams, makeScrapeResponse(2, 0, 0), srv, t)
}
func makeScrapeResponse(seeders, leechers, downloaded int64) bencode.Dict {
return bencode.Dict{
"files": bencode.Dict{
infoHash: bencode.Dict{
"complete": seeders,
"incomplete": leechers,
"downloaded": downloaded,
},
},
}
}
func checkScrape(p params, expected interface{}, srv *httptest.Server, t *testing.T) bool {
values := &url.Values{}
for k, v := range p {
values.Add(k, v)
}
response, err := http.Get(srv.URL + "/scrape?" + values.Encode())
if err != nil {
t.Error(err)
return false
}
body, err := ioutil.ReadAll(response.Body)
response.Body.Close()
if err != nil {
t.Error(err)
return false
}
got, err := bencode.Unmarshal(body)
if !reflect.DeepEqual(got, expected) {
t.Errorf("\ngot: %#v\nwanted: %#v", got, expected)
return false
}
return true
}

197
http/tracker.go Normal file
View file

@ -0,0 +1,197 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"errors"
"net"
"net/http"
"strconv"
"github.com/julienschmidt/httprouter"
"github.com/chihaya/chihaya/config"
"github.com/chihaya/chihaya/http/query"
"github.com/chihaya/chihaya/tracker/models"
)
// newAnnounce parses an HTTP request and generates a models.Announce.
func (s *Server) newAnnounce(r *http.Request, p httprouter.Params) (*models.Announce, error) {
q, err := query.New(r.URL.RawQuery)
if err != nil {
return nil, err
}
compact := q.Params["compact"] != "0"
event, _ := q.Params["event"]
numWant := requestedPeerCount(q, s.config.NumWantFallback)
infohash, exists := q.Params["info_hash"]
if !exists {
return nil, models.ErrMalformedRequest
}
peerID, exists := q.Params["peer_id"]
if !exists {
return nil, models.ErrMalformedRequest
}
jwt, exists := q.Params["jwt"]
if s.config.JWKSetURI != "" && !exists {
return nil, models.ErrMalformedRequest
}
port, err := q.Uint64("port")
if err != nil {
return nil, models.ErrMalformedRequest
}
left, err := q.Uint64("left")
if err != nil {
return nil, models.ErrMalformedRequest
}
ipv4, ipv6, err := requestedIP(q, r, &s.config.NetConfig)
if err != nil {
return nil, models.ErrMalformedRequest
}
ipv4Endpoint := models.Endpoint{ipv4, uint16(port)}
ipv6Endpoint := models.Endpoint{ipv6, uint16(port)}
downloaded, err := q.Uint64("downloaded")
if err != nil {
return nil, models.ErrMalformedRequest
}
uploaded, err := q.Uint64("uploaded")
if err != nil {
return nil, models.ErrMalformedRequest
}
return &models.Announce{
Config: s.config,
Compact: compact,
Downloaded: downloaded,
Event: event,
IPv4: ipv4Endpoint,
IPv6: ipv6Endpoint,
Infohash: infohash,
Left: left,
NumWant: numWant,
PeerID: peerID,
Uploaded: uploaded,
JWT: jwt,
}, nil
}
// newScrape parses an HTTP request and generates a models.Scrape.
func (s *Server) newScrape(r *http.Request, p httprouter.Params) (*models.Scrape, error) {
q, err := query.New(r.URL.RawQuery)
if err != nil {
return nil, err
}
if q.Infohashes == nil {
if _, exists := q.Params["info_hash"]; !exists {
// There aren't any infohashes.
return nil, models.ErrMalformedRequest
}
q.Infohashes = []string{q.Params["info_hash"]}
}
return &models.Scrape{
Config: s.config,
Infohashes: q.Infohashes,
}, nil
}
// requestedPeerCount returns the wanted peer count or the provided fallback.
func requestedPeerCount(q *query.Query, fallback int) int {
if numWantStr, exists := q.Params["numwant"]; exists {
numWant, err := strconv.Atoi(numWantStr)
if err != nil {
return fallback
}
return numWant
}
return fallback
}
// requestedIP returns the IP address for a request. If there are multiple in
// the request, one IPv4 and one IPv6 will be returned.
func requestedIP(q *query.Query, r *http.Request, cfg *config.NetConfig) (v4, v6 net.IP, err error) {
var done bool
if cfg.AllowIPSpoofing {
if str, ok := q.Params["ip"]; ok {
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
return
}
}
if str, ok := q.Params["ipv4"]; ok {
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
return
}
}
if str, ok := q.Params["ipv6"]; ok {
if v4, v6, done = getIPs(str, v4, v6, cfg); done {
return
}
}
}
if cfg.RealIPHeader != "" {
if xRealIPs, ok := r.Header[cfg.RealIPHeader]; ok {
if v4, v6, done = getIPs(string(xRealIPs[0]), v4, v6, cfg); done {
return
}
}
} else {
if r.RemoteAddr == "" && v4 == nil {
if v4, v6, done = getIPs("127.0.0.1", v4, v6, cfg); done {
return
}
}
if v4, v6, done = getIPs(r.RemoteAddr, v4, v6, cfg); done {
return
}
}
if v4 == nil && v6 == nil {
err = errors.New("failed to parse IP address")
}
return
}
func getIPs(ipstr string, ipv4, ipv6 net.IP, cfg *config.NetConfig) (net.IP, net.IP, bool) {
host, _, err := net.SplitHostPort(ipstr)
if err != nil {
host = ipstr
}
if ip := net.ParseIP(host); ip != nil {
ipTo4 := ip.To4()
if ipv4 == nil && ipTo4 != nil {
ipv4 = ipTo4
} else if ipv6 == nil && ipTo4 == nil {
ipv6 = ip
}
}
var done bool
if cfg.DualStackedPeers {
done = ipv4 != nil && ipv6 != nil
} else {
done = ipv4 != nil || ipv6 != nil
}
return ipv4, ipv6, done
}

118
http/writer.go Normal file
View file

@ -0,0 +1,118 @@
// Copyright 2015 The Chihaya Authors. All rights reserved.
// Use of this source code is governed by the BSD 2-Clause license,
// which can be found in the LICENSE file.
package http
import (
"bytes"
"net/http"
"github.com/chihaya/bencode"
"github.com/chihaya/chihaya/tracker/models"
)
// Writer implements the tracker.Writer interface for the HTTP protocol.
type Writer struct {
http.ResponseWriter
}
// WriteError writes a bencode dict with a failure reason.
func (w *Writer) WriteError(err error) error {
bencoder := bencode.NewEncoder(w)
return bencoder.Encode(bencode.Dict{
"failure reason": err.Error(),
})
}
// WriteAnnounce writes a bencode dict representation of an AnnounceResponse.
func (w *Writer) WriteAnnounce(res *models.AnnounceResponse) error {
dict := bencode.Dict{
"complete": res.Complete,
"incomplete": res.Incomplete,
"interval": res.Interval,
"min interval": res.MinInterval,
}
if res.Compact {
if res.IPv4Peers != nil {
dict["peers"] = compactPeers(false, res.IPv4Peers)
}
if res.IPv6Peers != nil {
compact := compactPeers(true, res.IPv6Peers)
// Don't bother writing the IPv6 field if there is no value.
if len(compact) > 0 {
dict["peers6"] = compact
}
}
} else if res.IPv4Peers != nil || res.IPv6Peers != nil {
dict["peers"] = peersList(res.IPv4Peers, res.IPv6Peers)
}
bencoder := bencode.NewEncoder(w)
return bencoder.Encode(dict)
}
// WriteScrape writes a bencode dict representation of a ScrapeResponse.
func (w *Writer) WriteScrape(res *models.ScrapeResponse) error {
dict := bencode.Dict{
"files": filesDict(res.Files),
}
bencoder := bencode.NewEncoder(w)
return bencoder.Encode(dict)
}
func compactPeers(ipv6 bool, peers models.PeerList) []byte {
var compactPeers bytes.Buffer
if ipv6 {
for _, peer := range peers {
compactPeers.Write(peer.IP)
compactPeers.Write([]byte{byte(peer.Port >> 8), byte(peer.Port & 0xff)})
}
} else {
for _, peer := range peers {
compactPeers.Write(peer.IP)
compactPeers.Write([]byte{byte(peer.Port >> 8), byte(peer.Port & 0xff)})
}
}
return compactPeers.Bytes()
}
func peersList(ipv4s, ipv6s models.PeerList) (peers []bencode.Dict) {
for _, peer := range ipv4s {
peers = append(peers, peerDict(&peer, false))
}
for _, peer := range ipv6s {
peers = append(peers, peerDict(&peer, true))
}
return peers
}
func peerDict(peer *models.Peer, ipv6 bool) bencode.Dict {
return bencode.Dict{
"ip": peer.IP.String(),
"peer id": peer.ID,
"port": peer.Port,
}
}
func filesDict(torrents []*models.Torrent) bencode.Dict {
d := bencode.NewDict()
for _, torrent := range torrents {
d[torrent.Infohash] = torrentDict(torrent)
}
return d
}
func torrentDict(torrent *models.Torrent) bencode.Dict {
return bencode.Dict{
"complete": torrent.Seeders.Len(),
"incomplete": torrent.Leechers.Len(),
"downloaded": torrent.Snatches,
}
}

View file

@ -1,107 +0,0 @@
// Package clientapproval implements a Hook that fails an Announce based on a
// whitelist or blacklist of BitTorrent client IDs.
package clientapproval
import (
"context"
"errors"
"fmt"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
)
// Name is the name by which this middleware is registered with Chihaya.
const Name = "client approval"
func init() {
middleware.RegisterDriver(Name, driver{})
}
var _ middleware.Driver = driver{}
type driver struct{}
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
}
return NewHook(cfg)
}
// ErrClientUnapproved is the error returned when a client's PeerID is invalid.
var ErrClientUnapproved = bittorrent.ClientError("unapproved client")
// Config represents all the values required by this middleware to validate
// peers based on their BitTorrent client ID.
type Config struct {
Whitelist []string `yaml:"whitelist"`
Blacklist []string `yaml:"blacklist"`
}
type hook struct {
approved map[bittorrent.ClientID]struct{}
unapproved map[bittorrent.ClientID]struct{}
}
// NewHook returns an instance of the client approval middleware.
func NewHook(cfg Config) (middleware.Hook, error) {
h := &hook{
approved: make(map[bittorrent.ClientID]struct{}),
unapproved: make(map[bittorrent.ClientID]struct{}),
}
if len(cfg.Whitelist) > 0 && len(cfg.Blacklist) > 0 {
return nil, fmt.Errorf("using both whitelist and blacklist is invalid")
}
for _, cidString := range cfg.Whitelist {
cidBytes := []byte(cidString)
if len(cidBytes) != 6 {
return nil, errors.New("client ID " + cidString + " must be 6 bytes")
}
var cid bittorrent.ClientID
copy(cid[:], cidBytes)
h.approved[cid] = struct{}{}
}
for _, cidString := range cfg.Blacklist {
cidBytes := []byte(cidString)
if len(cidBytes) != 6 {
return nil, errors.New("client ID " + cidString + " must be 6 bytes")
}
var cid bittorrent.ClientID
copy(cid[:], cidBytes)
h.unapproved[cid] = struct{}{}
}
return h, nil
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
clientID := bittorrent.NewClientID(req.Peer.ID)
if len(h.approved) > 0 {
if _, found := h.approved[clientID]; !found {
return ctx, ErrClientUnapproved
}
}
if len(h.unapproved) > 0 {
if _, found := h.unapproved[clientID]; found {
return ctx, ErrClientUnapproved
}
}
return ctx, nil
}
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes don't require any protection.
return ctx, nil
}

View file

@ -1,75 +0,0 @@
package clientapproval
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
var cases = []struct {
cfg Config
peerID string
approved bool
}{
// Client ID is whitelisted
{
Config{
Whitelist: []string{"010203"},
},
"01020304050607080900",
true,
},
// Client ID is not whitelisted
{
Config{
Whitelist: []string{"010203"},
},
"10203040506070809000",
false,
},
// Client ID is not blacklisted
{
Config{
Blacklist: []string{"010203"},
},
"00000000001234567890",
true,
},
// Client ID is blacklisted
{
Config{
Blacklist: []string{"123456"},
},
"12345678900000000000",
false,
},
}
func TestHandleAnnounce(t *testing.T) {
for _, tt := range cases {
t.Run(fmt.Sprintf("testing peerid %s", tt.peerID), func(t *testing.T) {
h, err := NewHook(tt.cfg)
require.Nil(t, err)
ctx := context.Background()
req := &bittorrent.AnnounceRequest{}
resp := &bittorrent.AnnounceResponse{}
peerid := bittorrent.PeerIDFromString(tt.peerID)
req.Peer.ID = peerid
nctx, err := h.HandleAnnounce(ctx, req, resp)
require.Equal(t, ctx, nctx)
if tt.approved == true {
require.NotEqual(t, err, ErrClientUnapproved)
} else {
require.Equal(t, err, ErrClientUnapproved)
}
})
}
}

View file

@ -1,84 +0,0 @@
// Package fixedpeers implements a Hook that
//appends a fixed peer to every Announce request
package fixedpeers
import (
"context"
"fmt"
"net"
"strconv"
"strings"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
)
// Name is the name by which this middleware is registered with Chihaya.
const Name = "fixed peers"
func init() {
middleware.RegisterDriver(Name, driver{})
}
var _ middleware.Driver = driver{}
type driver struct{}
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
}
return NewHook(cfg)
}
type Config struct {
FixedPeers []string `yaml:"fixed_peers"`
}
type hook struct {
peers []bittorrent.Peer
}
// NewHook returns an instance of the torrent approval middleware.
func NewHook(cfg Config) (middleware.Hook, error) {
var peers []bittorrent.Peer
for _, peerString := range cfg.FixedPeers {
parts := strings.Split(peerString, ":")
port, err := strconv.Atoi(parts[1])
if err != nil {
return nil, err
}
ip := net.ParseIP(parts[0]).To4()
if ip == nil {
panic("Invalid ip4 on fixed_peers")
}
peers = append(peers,
bittorrent.Peer{
ID: bittorrent.PeerID{0},
Port: uint16(port),
IP: bittorrent.IP{IP: ip},
})
}
h := &hook{
peers: peers,
}
return h, nil
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
for _, peer := range h.peers {
resp.IPv4Peers = append(resp.IPv4Peers, peer)
resp.Complete += 1
}
return ctx, nil
}
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes don't require any protection.
return ctx, nil
}

View file

@ -1,47 +0,0 @@
package fixedpeers
import (
"context"
"encoding/hex"
"net"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
func TestAppendFixedPeer(t *testing.T) {
conf := Config{
FixedPeers: []string{"8.8.8.8:4040", "1.1.1.1:111"},
}
h, err := NewHook(conf)
require.Nil(t, err)
ctx := context.Background()
req := &bittorrent.AnnounceRequest{}
resp := &bittorrent.AnnounceResponse{}
hashbytes, err := hex.DecodeString("3000000000000000000000000000000000000000")
require.Nil(t, err)
hashinfo := bittorrent.InfoHashFromBytes(hashbytes)
req.InfoHash = hashinfo
nctx, err := h.HandleAnnounce(ctx, req, resp)
require.Equal(t, ctx, nctx)
peers := []bittorrent.Peer{
bittorrent.Peer{
ID: bittorrent.PeerID{0},
Port: 4040,
IP: bittorrent.IP{net.ParseIP("8.8.8.8"), bittorrent.IPv4},
},
bittorrent.Peer{
ID: bittorrent.PeerID{0},
Port: 111,
IP: bittorrent.IP{net.ParseIP("1.1.1.1"), bittorrent.IPv4},
},
}
require.Equal(t, peers, resp.IPv4Peers)
}

View file

@ -1,148 +0,0 @@
package middleware
import (
"context"
"errors"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/storage"
)
// Hook abstracts the concept of anything that needs to interact with a
// BitTorrent client's request and response to a BitTorrent tracker.
// PreHooks and PostHooks both use the same interface.
//
// A Hook can implement stop.Stopper if clean shutdown is required.
type Hook interface {
HandleAnnounce(context.Context, *bittorrent.AnnounceRequest, *bittorrent.AnnounceResponse) (context.Context, error)
HandleScrape(context.Context, *bittorrent.ScrapeRequest, *bittorrent.ScrapeResponse) (context.Context, error)
}
type skipSwarmInteraction struct{}
// SkipSwarmInteractionKey is a key for the context of an Announce to control
// whether the swarm interaction middleware should run.
// Any non-nil value set for this key will cause the swarm interaction
// middleware to skip.
var SkipSwarmInteractionKey = skipSwarmInteraction{}
type swarmInteractionHook struct {
store storage.PeerStore
}
func (h *swarmInteractionHook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (_ context.Context, err error) {
if ctx.Value(SkipSwarmInteractionKey) != nil {
return ctx, nil
}
switch {
case req.Port < 100:
return ctx, nil
case req.Event == bittorrent.Stopped:
err = h.store.DeleteSeeder(req.InfoHash, req.Peer)
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
return ctx, err
}
err = h.store.DeleteLeecher(req.InfoHash, req.Peer)
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
return ctx, err
}
case req.Event == bittorrent.Completed:
err = h.store.GraduateLeecher(req.InfoHash, req.Peer)
return ctx, err
case req.Left == 0:
// Completed events will also have Left == 0, but by making this
// an extra case we can treat "old" seeders differently from
// graduating leechers. (Calling PutSeeder is probably faster
// than calling GraduateLeecher.)
err = h.store.PutSeeder(req.InfoHash, req.Peer)
return ctx, err
default:
err = h.store.PutLeecher(req.InfoHash, req.Peer)
return ctx, err
}
return ctx, nil
}
func (h *swarmInteractionHook) HandleScrape(ctx context.Context, _ *bittorrent.ScrapeRequest, _ *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes have no effect on the swarm.
return ctx, nil
}
type skipResponseHook struct{}
// SkipResponseHookKey is a key for the context of an Announce or Scrape to
// control whether the response middleware should run.
// Any non-nil value set for this key will cause the response middleware to
// skip.
var SkipResponseHookKey = skipResponseHook{}
type scrapeAddressType struct{}
// ScrapeIsIPv6Key is the key under which to store whether or not the
// address used to request a scrape was an IPv6 address.
// The value is expected to be of type bool.
// A missing value or a value that is not a bool for this key is equivalent to
// it being set to false.
var ScrapeIsIPv6Key = scrapeAddressType{}
type responseHook struct {
store storage.PeerStore
}
func (h *responseHook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (_ context.Context, err error) {
if ctx.Value(SkipResponseHookKey) != nil {
return ctx, nil
}
// Add the Scrape data to the response.
s := h.store.ScrapeSwarm(req.InfoHash, req.IP.AddressFamily)
resp.Incomplete += s.Incomplete
resp.Complete += s.Complete
err = h.appendPeers(req, resp)
return ctx, err
}
func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error {
seeding := req.Left == 0
peers, err := h.store.AnnouncePeers(req.InfoHash, seeding, int(req.NumWant), req.Peer)
if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
return err
}
// Some clients expect a minimum of their own peer representation returned to
// them if they are the only peer in a swarm.
if len(peers) == 0 {
if seeding {
resp.Complete++
} else {
resp.Incomplete++
}
peers = append(peers, req.Peer)
}
switch req.IP.AddressFamily {
case bittorrent.IPv4:
resp.IPv4Peers = append(resp.IPv4Peers, peers...)
case bittorrent.IPv6:
resp.IPv6Peers = append(resp.IPv6Peers, peers...)
default:
panic("attempted to append peer that is neither IPv4 nor IPv6")
}
return nil
}
func (h *responseHook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
if ctx.Value(SkipResponseHookKey) != nil {
return ctx, nil
}
for _, infoHash := range req.InfoHashes {
resp.Files = append(resp.Files, h.store.ScrapeSwarm(infoHash, req.AddressFamily))
}
return ctx, nil
}

View file

@ -1,251 +0,0 @@
// Package jwt implements a Hook that fails an Announce if the client's request
// is missing a valid JSON Web Token.
//
// JWTs are validated against the standard claims in RFC7519 along with an
// extra "infohash" claim that verifies the client has access to the Swarm.
// RS256 keys are asychronously rotated from a provided JWK Set HTTP endpoint.
package jwt
import (
"context"
"crypto"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"net/http"
"strings"
"time"
jc "github.com/SermoDigital/jose/crypto"
"github.com/SermoDigital/jose/jws"
"github.com/SermoDigital/jose/jwt"
"github.com/mendsley/gojwk"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
)
// Name is the name by which this middleware is registered with Chihaya.
const Name = "jwt"
func init() {
middleware.RegisterDriver(Name, driver{})
}
var _ middleware.Driver = driver{}
type driver struct{}
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
}
return NewHook(cfg)
}
var (
// ErrMissingJWT is returned when a JWT is missing from a request.
ErrMissingJWT = bittorrent.ClientError("unapproved request: missing jwt")
// ErrInvalidJWT is returned when a JWT fails to verify.
ErrInvalidJWT = bittorrent.ClientError("unapproved request: invalid jwt")
)
// Config represents all the values required by this middleware to fetch JWKs
// and verify JWTs.
type Config struct {
Issuer string `yaml:"issuer"`
Audience string `yaml:"audience"`
JWKSetURL string `yaml:"jwk_set_url"`
JWKUpdateInterval time.Duration `yaml:"jwk_set_update_interval"`
}
// LogFields implements log.Fielder for a Config.
func (cfg Config) LogFields() log.Fields {
return log.Fields{
"issuer": cfg.Issuer,
"audience": cfg.Audience,
"JWKSetURL": cfg.JWKSetURL,
"JWKUpdateInterval": cfg.JWKUpdateInterval,
}
}
type hook struct {
cfg Config
publicKeys map[string]crypto.PublicKey
closing chan struct{}
}
// NewHook returns an instance of the JWT middleware.
func NewHook(cfg Config) (middleware.Hook, error) {
log.Debug("creating new JWT middleware", cfg)
h := &hook{
cfg: cfg,
publicKeys: map[string]crypto.PublicKey{},
closing: make(chan struct{}),
}
log.Debug("performing initial fetch of JWKs")
if err := h.updateKeys(); err != nil {
return nil, errors.New("failed to fetch initial JWK Set: " + err.Error())
}
go func() {
for {
select {
case <-h.closing:
return
case <-time.After(cfg.JWKUpdateInterval):
log.Debug("performing fetch of JWKs")
_ = h.updateKeys()
}
}
}()
return h, nil
}
func (h *hook) updateKeys() error {
resp, err := http.Get(h.cfg.JWKSetURL)
if err != nil {
log.Error("failed to fetch JWK Set", log.Err(err))
return err
}
var parsedJWKs gojwk.Key
err = json.NewDecoder(resp.Body).Decode(&parsedJWKs)
if err != nil {
resp.Body.Close()
log.Error("failed to decode JWK JSON", log.Err(err))
return err
}
resp.Body.Close()
keys := map[string]crypto.PublicKey{}
for _, parsedJWK := range parsedJWKs.Keys {
publicKey, err := parsedJWK.DecodePublicKey()
if err != nil {
log.Error("failed to decode JWK into public key", log.Err(err))
return err
}
keys[parsedJWK.Kid] = publicKey
}
h.publicKeys = keys
log.Debug("successfully fetched JWK Set")
return nil
}
func (h *hook) Stop() stop.Result {
log.Debug("attempting to shutdown JWT middleware")
select {
case <-h.closing:
return stop.AlreadyStopped
default:
}
c := make(stop.Channel)
go func() {
close(h.closing)
c.Done()
}()
return c.Result()
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
if req.Params == nil {
return ctx, ErrMissingJWT
}
jwtParam, ok := req.Params.String("jwt")
if !ok {
return ctx, ErrMissingJWT
}
if err := validateJWT(req.InfoHash, []byte(jwtParam), h.cfg.Issuer, h.cfg.Audience, h.publicKeys); err != nil {
return ctx, ErrInvalidJWT
}
return ctx, nil
}
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes don't require any protection.
return ctx, nil
}
func validateJWT(ih bittorrent.InfoHash, jwtBytes []byte, cfgIss, cfgAud string, publicKeys map[string]crypto.PublicKey) error {
parsedJWT, err := jws.ParseJWT(jwtBytes)
if err != nil {
return err
}
claims := parsedJWT.Claims()
if iss, ok := claims.Issuer(); !ok || iss != cfgIss {
log.Debug("unequal or missing issuer when validating JWT", log.Fields{
"exists": ok,
"claim": iss,
"config": cfgIss,
})
return jwt.ErrInvalidISSClaim
}
if auds, ok := claims.Audience(); !ok || !in(cfgAud, auds) {
log.Debug("unequal or missing audience when validating JWT", log.Fields{
"exists": ok,
"claim": strings.Join(auds, ","),
"config": cfgAud,
})
return jwt.ErrInvalidAUDClaim
}
ihHex := hex.EncodeToString(ih[:])
if ihClaim, ok := claims.Get("infohash").(string); !ok || ihClaim != ihHex {
log.Debug("unequal or missing infohash when validating JWT", log.Fields{
"exists": ok,
"claim": ihClaim,
"request": ihHex,
})
return errors.New("claim \"infohash\" is invalid")
}
parsedJWS := parsedJWT.(jws.JWS)
kid, ok := parsedJWS.Protected().Get("kid").(string)
if !ok {
log.Debug("missing kid when validating JWT", log.Fields{
"exists": ok,
"claim": kid,
})
return errors.New("invalid kid")
}
publicKey, ok := publicKeys[kid]
if !ok {
log.Debug("missing public key forkid when validating JWT", log.Fields{
"kid": kid,
})
return errors.New("signed by unknown kid")
}
err = parsedJWS.Verify(publicKey, jc.SigningMethodRS256)
if err != nil {
log.Debug("failed to verify signature of JWT", log.Err(err))
return err
}
return nil
}
func in(x string, xs []string) bool {
for _, y := range xs {
if x == y {
return true
}
}
return false
}

View file

@ -1,125 +0,0 @@
package middleware
import (
"context"
"time"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/frontend"
"github.com/chihaya/chihaya/pkg/log"
"github.com/chihaya/chihaya/pkg/stop"
"github.com/chihaya/chihaya/storage"
)
// ResponseConfig holds the configuration used for the actual response.
//
// TODO(jzelinskie): Evaluate whether we would like to make this optional.
// We can make Chihaya extensible enough that you can program a new response
// generator at the cost of making it possible for users to create config that
// won't compose a functional tracker.
type ResponseConfig struct {
AnnounceInterval time.Duration `yaml:"announce_interval"`
MinAnnounceInterval time.Duration `yaml:"min_announce_interval"`
}
var _ frontend.TrackerLogic = &Logic{}
// NewLogic creates a new instance of a TrackerLogic that executes the provided
// middleware hooks.
func NewLogic(cfg ResponseConfig, peerStore storage.PeerStore, preHooks, postHooks []Hook) *Logic {
return &Logic{
announceInterval: cfg.AnnounceInterval,
minAnnounceInterval: cfg.MinAnnounceInterval,
peerStore: peerStore,
preHooks: append(preHooks, &responseHook{store: peerStore}),
postHooks: append(postHooks, &swarmInteractionHook{store: peerStore}),
}
}
// Logic is an implementation of the TrackerLogic that functions by
// executing a series of middleware hooks.
type Logic struct {
announceInterval time.Duration
minAnnounceInterval time.Duration
peerStore storage.PeerStore
preHooks []Hook
postHooks []Hook
}
// HandleAnnounce generates a response for an Announce.
func (l *Logic) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (_ context.Context, resp *bittorrent.AnnounceResponse, err error) {
resp = &bittorrent.AnnounceResponse{
Interval: l.announceInterval,
MinInterval: l.minAnnounceInterval,
Compact: req.Compact,
}
for _, h := range l.preHooks {
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
return nil, nil, err
}
}
log.Debug("generated announce response", resp)
return ctx, resp, nil
}
// AfterAnnounce does something with the results of an Announce after it has
// been completed.
func (l *Logic) AfterAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) {
var err error
for _, h := range l.postHooks {
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
log.Error("post-announce hooks failed", log.Err(err))
return
}
}
}
// HandleScrape generates a response for a Scrape.
func (l *Logic) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest) (_ context.Context, resp *bittorrent.ScrapeResponse, err error) {
resp = &bittorrent.ScrapeResponse{
Files: make([]bittorrent.Scrape, 0, len(req.InfoHashes)),
}
for _, h := range l.preHooks {
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
return nil, nil, err
}
}
log.Debug("generated scrape response", resp)
return ctx, resp, nil
}
// AfterScrape does something with the results of a Scrape after it has been
// completed.
func (l *Logic) AfterScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) {
var err error
for _, h := range l.postHooks {
if ctx, err = h.HandleScrape(ctx, req, resp); err != nil {
log.Error("post-scrape hooks failed", log.Err(err))
return
}
}
}
// Stop stops the Logic.
//
// This stops any hooks that implement stop.Stopper.
func (l *Logic) Stop() stop.Result {
stopGroup := stop.NewGroup()
for _, hook := range l.preHooks {
stoppable, ok := hook.(stop.Stopper)
if ok {
stopGroup.Add(stoppable)
}
}
for _, hook := range l.postHooks {
stoppable, ok := hook.(stop.Stopper)
if ok {
stopGroup.Add(stoppable)
}
}
return stopGroup.Stop()
}

View file

@ -1,83 +0,0 @@
package middleware
import (
"context"
"fmt"
"net"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
// nopHook is a Hook to measure the overhead of a no-operation Hook through
// benchmarks.
type nopHook struct{}
func (h *nopHook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
return ctx, nil
}
func (h *nopHook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
return ctx, nil
}
type hookList []Hook
func (hooks hookList) handleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest) (resp *bittorrent.AnnounceResponse, err error) {
resp = &bittorrent.AnnounceResponse{
Interval: 60,
MinInterval: 60,
Compact: true,
}
for _, h := range []Hook(hooks) {
if ctx, err = h.HandleAnnounce(ctx, req, resp); err != nil {
return nil, err
}
}
return resp, nil
}
func benchHookListV4(b *testing.B, hooks hookList) {
req := &bittorrent.AnnounceRequest{Peer: bittorrent.Peer{IP: bittorrent.IP{IP: net.ParseIP("1.2.3.4"), AddressFamily: bittorrent.IPv4}}}
benchHookList(b, hooks, req)
}
func benchHookListV6(b *testing.B, hooks hookList) {
req := &bittorrent.AnnounceRequest{Peer: bittorrent.Peer{IP: bittorrent.IP{IP: net.ParseIP("fc00::0001"), AddressFamily: bittorrent.IPv6}}}
benchHookList(b, hooks, req)
}
func benchHookList(b *testing.B, hooks hookList, req *bittorrent.AnnounceRequest) {
ctx := context.Background()
b.ResetTimer()
for i := 0; i < b.N; i++ {
resp, err := hooks.handleAnnounce(ctx, req)
require.Nil(b, err)
require.NotNil(b, resp)
}
}
func BenchmarkHookOverhead(b *testing.B) {
b.Run("none-v4", func(b *testing.B) {
benchHookListV4(b, hookList{})
})
b.Run("none-v6", func(b *testing.B) {
benchHookListV6(b, hookList{})
})
var nopHooks hookList
for i := 1; i < 4; i++ {
nopHooks = append(nopHooks, &nopHook{})
b.Run(fmt.Sprintf("%dnop-v4", i), func(b *testing.B) {
benchHookListV4(b, nopHooks)
})
b.Run(fmt.Sprintf("%dnop-v6", i), func(b *testing.B) {
benchHookListV6(b, nopHooks)
})
}
}

View file

@ -1,94 +0,0 @@
// Package middleware implements the TrackerLogic interface by executing
// a series of middleware hooks.
package middleware
import (
"errors"
"sync"
yaml "gopkg.in/yaml.v2"
)
var (
driversM sync.RWMutex
drivers = make(map[string]Driver)
// ErrDriverDoesNotExist is the error returned by NewMiddleware when a
// middleware driver with that name does not exist.
ErrDriverDoesNotExist = errors.New("middleware driver with that name does not exist")
)
// Driver is the interface used to initialize a new type of middleware.
//
// The options parameter is YAML encoded bytes that should be unmarshalled into
// the hook's custom configuration.
type Driver interface {
NewHook(options []byte) (Hook, error)
}
// RegisterDriver makes a Driver available by the provided name.
//
// If called twice with the same name, the name is blank, or if the provided
// Driver is nil, this function panics.
func RegisterDriver(name string, d Driver) {
if name == "" {
panic("middleware: could not register a Driver with an empty name")
}
if d == nil {
panic("middleware: could not register a nil Driver")
}
driversM.Lock()
defer driversM.Unlock()
if _, dup := drivers[name]; dup {
panic("middleware: RegisterDriver called twice for " + name)
}
drivers[name] = d
}
// New attempts to initialize a new middleware instance from the
// list of registered Drivers.
//
// If a driver does not exist, returns ErrDriverDoesNotExist.
func New(name string, optionBytes []byte) (Hook, error) {
driversM.RLock()
defer driversM.RUnlock()
var d Driver
d, ok := drivers[name]
if !ok {
return nil, ErrDriverDoesNotExist
}
return d.NewHook(optionBytes)
}
// HookConfig is the generic configuration format used for all registered Hooks.
type HookConfig struct {
Name string `yaml:"name"`
Options map[string]interface{} `yaml:"options"`
}
// HooksFromHookConfigs is a utility function for initializing Hooks in bulk.
func HooksFromHookConfigs(cfgs []HookConfig) (hooks []Hook, err error) {
for _, cfg := range cfgs {
// Marshal the options back into bytes.
var optionBytes []byte
optionBytes, err = yaml.Marshal(cfg.Options)
if err != nil {
return
}
var h Hook
h, err = New(cfg.Name, optionBytes)
if err != nil {
return
}
hooks = append(hooks, h)
}
return
}

View file

@ -1,17 +0,0 @@
package random
import (
"encoding/binary"
"github.com/chihaya/chihaya/bittorrent"
)
// DeriveEntropyFromRequest generates 2*64 bits of pseudo random state from an
// AnnounceRequest.
//
// Calling DeriveEntropyFromRequest multiple times yields the same values.
func DeriveEntropyFromRequest(req *bittorrent.AnnounceRequest) (uint64, uint64) {
v0 := binary.BigEndian.Uint64(req.InfoHash[:8]) + binary.BigEndian.Uint64(req.InfoHash[8:16])
v1 := binary.BigEndian.Uint64(req.Peer.ID[:8]) + binary.BigEndian.Uint64(req.Peer.ID[8:16])
return v0, v1
}

View file

@ -1,28 +0,0 @@
// Package random implements the XORShift PRNG and a way to derive random state
// from an AnnounceRequest.
package random
// GenerateAndAdvance applies XORShift128Plus on s0 and s1, returning
// the new states newS0, newS1 and a pseudo-random number v.
func GenerateAndAdvance(s0, s1 uint64) (v, newS0, newS1 uint64) {
v = s0 + s1
newS0 = s1
s0 ^= (s0 << 23)
newS1 = s0 ^ s1 ^ (s0 >> 18) ^ (s1 >> 5)
return
}
// Intn generates an int k that satisfies k >= 0 && k < n.
// n must be > 0.
// It returns the generated k and the new state of the generator.
func Intn(s0, s1 uint64, n int) (int, uint64, uint64) {
if n <= 0 {
panic("invalid n <= 0")
}
v, newS0, newS1 := GenerateAndAdvance(s0, s1)
k := int(v)
if k < 0 {
k = -k
}
return k % n, newS0, newS1
}

View file

@ -1,38 +0,0 @@
package random
import (
"math/rand"
"testing"
"time"
"github.com/stretchr/testify/require"
)
func TestIntn(t *testing.T) {
rand.Seed(time.Now().UnixNano())
s0, s1 := rand.Uint64(), rand.Uint64()
var k int
for i := 0; i < 10000; i++ {
k, s0, s1 = Intn(s0, s1, 10)
require.True(t, k >= 0, "Intn() must be >= 0")
require.True(t, k < 10, "Intn(k) must be < k")
}
}
func BenchmarkAdvanceXORShift128Plus(b *testing.B) {
s0, s1 := rand.Uint64(), rand.Uint64()
var v uint64
for i := 0; i < b.N; i++ {
v, s0, s1 = GenerateAndAdvance(s0, s1)
}
_, _, _ = v, s0, s1
}
func BenchmarkIntn(b *testing.B) {
s0, s1 := rand.Uint64(), rand.Uint64()
var v int
for i := 0; i < b.N; i++ {
v, s0, s1 = Intn(s0, s1, 1000)
}
_, _, _ = v, s0, s1
}

View file

@ -1,109 +0,0 @@
// Package torrentapproval implements a Hook that fails an Announce based on a
// whitelist or blacklist of torrent hash.
package torrentapproval
import (
"context"
"encoding/hex"
"fmt"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
)
// Name is the name by which this middleware is registered with Chihaya.
const Name = "torrent approval"
func init() {
middleware.RegisterDriver(Name, driver{})
}
var _ middleware.Driver = driver{}
type driver struct{}
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
}
return NewHook(cfg)
}
// ErrTorrentUnapproved is the error returned when a torrent hash is invalid.
var ErrTorrentUnapproved = bittorrent.ClientError("unapproved torrent")
// Config represents all the values required by this middleware to validate
// torrents based on their hash value.
type Config struct {
Whitelist []string `yaml:"whitelist"`
Blacklist []string `yaml:"blacklist"`
}
type hook struct {
approved map[bittorrent.InfoHash]struct{}
unapproved map[bittorrent.InfoHash]struct{}
}
// NewHook returns an instance of the torrent approval middleware.
func NewHook(cfg Config) (middleware.Hook, error) {
h := &hook{
approved: make(map[bittorrent.InfoHash]struct{}),
unapproved: make(map[bittorrent.InfoHash]struct{}),
}
if len(cfg.Whitelist) > 0 && len(cfg.Blacklist) > 0 {
return nil, fmt.Errorf("using both whitelist and blacklist is invalid")
}
for _, hashString := range cfg.Whitelist {
hashinfo, err := hex.DecodeString(hashString)
if err != nil {
return nil, fmt.Errorf("whitelist : invalid hash %s", hashString)
}
if len(hashinfo) != 20 {
return nil, fmt.Errorf("whitelist : hash %s is not 20 byes", hashString)
}
h.approved[bittorrent.InfoHashFromBytes(hashinfo)] = struct{}{}
}
for _, hashString := range cfg.Blacklist {
hashinfo, err := hex.DecodeString(hashString)
if err != nil {
return nil, fmt.Errorf("blacklist : invalid hash %s", hashString)
}
if len(hashinfo) != 20 {
return nil, fmt.Errorf("blacklist : hash %s is not 20 byes", hashString)
}
h.unapproved[bittorrent.InfoHashFromBytes(hashinfo)] = struct{}{}
}
return h, nil
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
infohash := req.InfoHash
if len(h.approved) > 0 {
if _, found := h.approved[infohash]; !found {
return ctx, ErrTorrentUnapproved
}
}
if len(h.unapproved) > 0 {
if _, found := h.unapproved[infohash]; found {
return ctx, ErrTorrentUnapproved
}
}
return ctx, nil
}
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes don't require any protection.
return ctx, nil
}

View file

@ -1,79 +0,0 @@
package torrentapproval
import (
"context"
"encoding/hex"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
var cases = []struct {
cfg Config
ih string
approved bool
}{
// Infohash is whitelisted
{
Config{
Whitelist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
},
"3532cf2d327fad8448c075b4cb42c8136964a435",
true,
},
// Infohash is not whitelisted
{
Config{
Whitelist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
},
"4532cf2d327fad8448c075b4cb42c8136964a435",
false,
},
// Infohash is not blacklisted
{
Config{
Blacklist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
},
"4532cf2d327fad8448c075b4cb42c8136964a435",
true,
},
// Infohash is blacklisted
{
Config{
Blacklist: []string{"3532cf2d327fad8448c075b4cb42c8136964a435"},
},
"3532cf2d327fad8448c075b4cb42c8136964a435",
false,
},
}
func TestHandleAnnounce(t *testing.T) {
for _, tt := range cases {
t.Run(fmt.Sprintf("testing hash %s", tt.ih), func(t *testing.T) {
h, err := NewHook(tt.cfg)
require.Nil(t, err)
ctx := context.Background()
req := &bittorrent.AnnounceRequest{}
resp := &bittorrent.AnnounceResponse{}
hashbytes, err := hex.DecodeString(tt.ih)
require.Nil(t, err)
hashinfo := bittorrent.InfoHashFromBytes(hashbytes)
req.InfoHash = hashinfo
nctx, err := h.HandleAnnounce(ctx, req, resp)
require.Equal(t, ctx, nctx)
if tt.approved == true {
require.NotEqual(t, err, ErrTorrentUnapproved)
} else {
require.Equal(t, err, ErrTorrentUnapproved)
}
})
}
}

View file

@ -1,115 +0,0 @@
package varinterval
import (
"context"
"errors"
"fmt"
"sync"
"time"
yaml "gopkg.in/yaml.v2"
"github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/middleware"
"github.com/chihaya/chihaya/middleware/pkg/random"
)
// Name is the name by which this middleware is registered with Chihaya.
const Name = "interval variation"
func init() {
middleware.RegisterDriver(Name, driver{})
}
var _ middleware.Driver = driver{}
type driver struct{}
func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
}
return NewHook(cfg)
}
// ErrInvalidModifyResponseProbability is returned for a config with an invalid
// ModifyResponseProbability.
var ErrInvalidModifyResponseProbability = errors.New("invalid modify_response_probability")
// ErrInvalidMaxIncreaseDelta is returned for a config with an invalid
// MaxIncreaseDelta.
var ErrInvalidMaxIncreaseDelta = errors.New("invalid max_increase_delta")
// Config represents the configuration for the varinterval middleware.
type Config struct {
// ModifyResponseProbability is the probability by which a response will
// be modified.
ModifyResponseProbability float32 `yaml:"modify_response_probability"`
// MaxIncreaseDelta is the amount of seconds that will be added at most.
MaxIncreaseDelta int `yaml:"max_increase_delta"`
// ModifyMinInterval specifies whether min_interval should be increased
// as well.
ModifyMinInterval bool `yaml:"modify_min_interval"`
}
func checkConfig(cfg Config) error {
if cfg.ModifyResponseProbability <= 0 || cfg.ModifyResponseProbability > 1 {
return ErrInvalidModifyResponseProbability
}
if cfg.MaxIncreaseDelta <= 0 {
return ErrInvalidMaxIncreaseDelta
}
return nil
}
type hook struct {
cfg Config
sync.Mutex
}
// NewHook creates a middleware to randomly modify the announce interval from
// the given config.
func NewHook(cfg Config) (middleware.Hook, error) {
if err := checkConfig(cfg); err != nil {
return nil, err
}
h := &hook{
cfg: cfg,
}
return h, nil
}
func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) (context.Context, error) {
s0, s1 := random.DeriveEntropyFromRequest(req)
// Generate a probability p < 1.0.
v, s0, s1 := random.Intn(s0, s1, 1<<24)
p := float32(v) / (1 << 24)
if h.cfg.ModifyResponseProbability == 1 || p < h.cfg.ModifyResponseProbability {
// Generate the increase delta.
v, _, _ = random.Intn(s0, s1, h.cfg.MaxIncreaseDelta)
deltaDuration := time.Duration(v+1) * time.Second
resp.Interval += deltaDuration
if h.cfg.ModifyMinInterval {
resp.MinInterval += deltaDuration
}
return ctx, nil
}
return ctx, nil
}
func (h *hook) HandleScrape(ctx context.Context, req *bittorrent.ScrapeRequest, resp *bittorrent.ScrapeResponse) (context.Context, error) {
// Scrapes are not altered.
return ctx, nil
}

View file

@ -1,61 +0,0 @@
package varinterval
import (
"context"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/chihaya/chihaya/bittorrent"
)
var configTests = []struct {
cfg Config
expected error
}{
{
cfg: Config{0.5, 60, true},
expected: nil,
}, {
cfg: Config{1.0, 60, true},
expected: nil,
}, {
cfg: Config{0.0, 60, true},
expected: ErrInvalidModifyResponseProbability,
}, {
cfg: Config{1.1, 60, true},
expected: ErrInvalidModifyResponseProbability,
}, {
cfg: Config{0.5, 0, true},
expected: ErrInvalidMaxIncreaseDelta,
}, {
cfg: Config{0.5, -10, true},
expected: ErrInvalidMaxIncreaseDelta,
},
}
func TestCheckConfig(t *testing.T) {
for _, tt := range configTests {
t.Run(fmt.Sprintf("%#v", tt.cfg), func(t *testing.T) {
got := checkConfig(tt.cfg)
require.Equal(t, tt.expected, got, "", tt.cfg)
})
}
}
func TestHandleAnnounce(t *testing.T) {
h, err := NewHook(Config{1.0, 10, true})
require.Nil(t, err)
require.NotNil(t, h)
ctx := context.Background()
req := &bittorrent.AnnounceRequest{}
resp := &bittorrent.AnnounceResponse{}
nCtx, err := h.HandleAnnounce(ctx, req, resp)
require.Nil(t, err)
require.Equal(t, ctx, nCtx)
require.True(t, resp.Interval > 0, "interval should have been increased")
require.True(t, resp.MinInterval > 0, "min_interval should have been increased")
}

Some files were not shown because too many files have changed in this diff Show more