Merge pull request #549 from jzelinskie/fix-vet

.github: split workflows into build/lint, add new linters
This commit is contained in:
Jimmy Zelinskie 2022-01-18 13:49:14 -05:00 committed by GitHub
commit 828edb8fd8
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
48 changed files with 533 additions and 358 deletions

4
.github/FUNDING.yml vendored
View file

@ -1 +1,3 @@
github: [ jzelinskie ] ---
github:
- "jzelinskie"

View file

@ -1,22 +1,23 @@
---
version: 2 version: 2
updates: updates:
- package-ecosystem: "github-actions" - package-ecosystem: "github-actions"
directory: "/" directory: "/"
schedule: schedule:
interval: "monthly" interval: "monthly"
labels: labels:
- "component/dependencies" - "component/dependencies"
- package-ecosystem: "gomod" - package-ecosystem: "gomod"
directory: "/" directory: "/"
schedule: schedule:
interval: "monthly" interval: "monthly"
labels: labels:
- "component/dependencies" - "component/dependencies"
- package-ecosystem: "docker" - package-ecosystem: "docker"
directory: "/" directory: "/"
schedule: schedule:
interval: "monthly" interval: "monthly"
labels: labels:
- "component/dependencies" - "component/dependencies"

View file

@ -1,115 +0,0 @@
name: CI
on:
# See the documentation for more intricate event dispatch here:
# https://help.github.com/en/actions/reference/workflow-syntax-for-github-actions#on
push:
branches:
- "!dependabot/*"
- "*"
pull_request:
branches:
- "*"
jobs:
build:
name: Build & Lint
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup
uses: actions/setup-go@v2
with:
go-version: ^1.15
- name: Build
run: go build -v ./cmd/...
- name: Vet
run: go vet ./...
- name: Imports
uses: Jerome1337/goimports-action@v1.0.3
- name: Format
uses: Jerome1337/gofmt-action@v1.0.4
- name: Lint
uses: Jerome1337/golint-action@v1.0.2
unit:
name: Unit Tests
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup
uses: actions/setup-go@v2
with:
go-version: ^1.15
- name: Unit Tests
run: go test -v -race $(go list ./...)
e2e-mem:
name: E2E Tests (Memory Storage)
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup
uses: actions/setup-go@v2
with:
go-version: ^1.15
- name: End-to-End Test
run: |
go install ./cmd/chihaya
cat ./dist/example_config.yaml
chihaya --config=./dist/example_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
e2e-redis:
name: E2E Tests (Redis Storage)
runs-on: ubuntu-latest
services:
redis:
image: redis
ports: ["6379:6379"]
options: --entrypoint redis-server
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup
uses: actions/setup-go@v2
with:
go-version: ^1.15
- name: Configure redis storage
run: |
curl -LO https://github.com/jzelinskie/faq/releases/download/0.0.6/faq-linux-amd64
chmod +x faq-linux-amd64
./faq-linux-amd64 '.chihaya.storage = {"config":{"gc_interval":"3m","peer_lifetime":"31m","prometheus_reporting_interval":"1s","redis_broker":"redis://127.0.0.1:6379/0","redis_connect_timeout":"15s","redis_read_timeout":"15s","redis_write_timeout":"15s"},"name":"redis"}' ./dist/example_config.yaml > ./dist/example_redis_config.yaml
cat ./dist/example_redis_config.yaml
- name: End-to-End Test
run: |
go install ./cmd/chihaya
chihaya --config=./dist/example_redis_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
dist:
name: Helm Template
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Install Helm
uses: engineerd/configurator@v0.0.5
with:
name: helm
pathInArchive: linux-amd64/helm
fromGitHubReleases: true
repo: helm/helm
version: ^v3
urlTemplate: https://get.helm.sh/helm-{{version}}-linux-amd64.tar.gz
token: ${{ secrets.GITHUB_TOKEN }}
- name: Helm Template
working-directory: ./dist/helm/chihaya
run: helm template . --debug

112
.github/workflows/build.yaml vendored Normal file
View file

@ -0,0 +1,112 @@
---
name: "Build & Test"
on:
push:
branches:
- "!dependabot/*"
- "main"
pull_request:
branches: ["*"]
jobs:
build:
name: "Go Build"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Build"
run: "go build ./cmd/..."
unit:
name: "Run Unit Tests"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Run `go test`"
run: "go test -race ./..."
e2e-mem:
name: "E2E Memory Tests"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install and configure chihaya"
run: |
go install ./cmd/chihaya
cat ./dist/example_config.yaml
- name: "Run end-to-end tests"
run: |
chihaya --config=./dist/example_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
e2e-redis:
name: "E2E Redis Tests"
runs-on: "ubuntu-latest"
services:
redis:
image: "redis"
ports: ["6379:6379"]
options: "--entrypoint redis-server"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install and configure chihaya"
run: |
go install ./cmd/chihaya
curl -LO https://github.com/jzelinskie/faq/releases/download/0.0.6/faq-linux-amd64
chmod +x faq-linux-amd64
./faq-linux-amd64 '.chihaya.storage = {"config":{"gc_interval":"3m","peer_lifetime":"31m","prometheus_reporting_interval":"1s","redis_broker":"redis://127.0.0.1:6379/0","redis_connect_timeout":"15s","redis_read_timeout":"15s","redis_write_timeout":"15s"},"name":"redis"}' ./dist/example_config.yaml > ./dist/example_redis_config.yaml
cat ./dist/example_redis_config.yaml
- name: "Run end-to-end tests"
run: |
chihaya --config=./dist/example_redis_config.yaml --debug &
pid=$!
sleep 2
chihaya e2e --debug
kill $pid
image-build:
name: "Docker Build"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "docker/setup-qemu-action@v1"
- uses: "docker/setup-buildx-action@v1"
with:
driver-opts: "image=moby/buildkit:master"
- uses: "docker/build-push-action@v1"
with:
push: false
tags: "latest"
helm:
name: "Helm Template"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- name: "Install Helm"
uses: "engineerd/configurator@v0.0.5"
with:
name: "helm"
pathInArchive: "linux-amd64/helm"
fromGitHubReleases: true
repo: "helm/helm"
version: "^v3"
urlTemplate: "https://get.helm.sh/helm-{{version}}-linux-amd64.tar.gz"
token: "${{ secrets.GITHUB_TOKEN }}"
- name: "Run `helm template`"
working-directory: "./dist/helm/chihaya"
run: "helm template . --debug"

86
.github/workflows/lint.yaml vendored Normal file
View file

@ -0,0 +1,86 @@
---
name: "Lint"
on:
push:
branches:
- "!dependabot/*"
- "main"
pull_request:
branches: ["*"]
jobs:
go-mod-tidy:
name: "Lint Go Modules"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Run `go mod tidy`"
run: "go mod tidy && bash -c '[ $(git status --porcelain | tee /dev/fd/2 | wc -c) -eq 0 ]'"
go-fmt:
name: "Format Go"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- name: "Install gofumpt"
run: "go install mvdan.cc/gofumpt@latest"
- name: "Run `gofumpt`"
run: |
GOFUMPT_OUTPUT="$(find . -iname '*.go' -type f | xargs gofumpt -d)"
if [ -n "$GOFUMPT_OUTPUT" ]; then
echo "The following files are not correctly formatted:"
echo "${GOFUMPT_OUTPUT}"
exit 1
fi
go-lint:
name: "Lint Go"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "actions/setup-go@v2"
with:
go-version: "^1.17"
- uses: "golangci/golangci-lint-action@v2"
with:
version: "v1.43"
skip-go-installation: true
skip-pkg-cache: true
skip-build-cache: false
extra-lint:
name: "Lint YAML & Markdown"
runs-on: "ubuntu-latest"
steps:
- uses: "actions/checkout@v2"
- uses: "bewuethr/yamllint-action@v1.1.1"
with:
config-file: ".yamllint"
- uses: "nosborn/github-action-markdown-cli@v2.0.0"
with:
files: "."
config_file: ".markdownlint.yaml"
codeql:
name: "Analyze with CodeQL"
runs-on: "ubuntu-latest"
permissions:
actions: "read"
contents: "read"
security-events: "write"
strategy:
fail-fast: false
matrix:
language: ["go"]
steps:
- uses: "actions/checkout@v2"
- uses: "github/codeql-action/init@v1"
with:
languages: "${{ matrix.language }}"
- uses: "github/codeql-action/autobuild@v1"
- uses: "github/codeql-action/analyze@v1"

50
.golangci.yaml Normal file
View file

@ -0,0 +1,50 @@
---
run:
timeout: "5m"
output:
sort-results: true
linters-settings:
goimports:
local-prefixes: "github.com/chihaya/chihaya"
gosec:
excludes:
- "G404" # Allow the usage of math/rand
linters:
enable:
- "bidichk"
- "bodyclose"
- "deadcode"
- "errcheck"
- "errname"
- "errorlint"
- "gofumpt"
- "goimports"
- "goprintffuncname"
- "gosec"
- "gosimple"
- "govet"
- "ifshort"
- "importas"
- "ineffassign"
- "makezero"
- "prealloc"
- "predeclared"
- "revive"
- "rowserrcheck"
- "staticcheck"
- "structcheck"
- "stylecheck"
- "tenv"
- "typecheck"
- "unconvert"
- "unused"
- "varcheck"
- "wastedassign"
- "whitespace"
issues:
include:
- "EXC0012" # Exported should have comment
- "EXC0012" # Exported should have comment
- "EXC0013" # Package comment should be of form
- "EXC0014" # Comment on exported should be of form
- "EXC0015" # Should have a package comment

3
.markdownlint.yaml Normal file
View file

@ -0,0 +1,3 @@
---
line-length: false
no-hard-tabs: false

11
.yamllint Normal file
View file

@ -0,0 +1,11 @@
# vim: ft=yaml
---
yaml-files:
- "*.yaml"
- "*.yml"
- ".yamllint"
ignore: "dist/helm/"
extends: "default"
rules:
quoted-strings: "enable"
line-length: "disable"

View file

@ -1,3 +1,5 @@
# How to Contribute
## Discussion ## Discussion
Long-term discussion and bug reports are maintained via [GitHub Issues]. Long-term discussion and bug reports are maintained via [GitHub Issues].
@ -55,7 +57,7 @@ All files should have `gofmt` executed on them and code should strive to have fu
We follow a rough convention for commit messages that is designed to answer two questions: what changed and why. We follow a rough convention for commit messages that is designed to answer two questions: what changed and why.
The subject line should feature the what and the body of the commit should describe the why. The subject line should feature the what and the body of the commit should describe the why.
``` ```git
scripts: add the test-cluster command scripts: add the test-cluster command
this uses tmux to setup a test cluster that you can easily kill and this uses tmux to setup a test cluster that you can easily kill and
@ -66,7 +68,7 @@ Fixes #38
The format can be described more formally as follows: The format can be described more formally as follows:
``` ```git
<subsystem>: <what changed> <subsystem>: <what changed>
<BLANK LINE> <BLANK LINE>
<why this change was made> <why this change was made>

View file

@ -1,7 +1,7 @@
# Chihaya # Chihaya
[![Build Status](https://github.com/chihaya/chihaya/workflows/CI/badge.svg)](https://github.com/chihaya/chihaya/actions) [![Build Status](https://github.com/chihaya/chihaya/workflows/Build%20&%20Test/badge.svg)](https://github.com/chihaya/chihaya/actions)
[![Docker Repository on Quay.io](https://quay.io/repository/jzelinskie/chihaya/status "Docker Repository on Quay.io")](https://quay.io/repository/jzelinskie/chihaya) [![Container Image](https://img.shields.io/github/v/release/chihaya/chihaya?color=%232496ED&label=container&logo=docker "Container Image")](https://quay.io/repository/jzelinskie/chihaya?tab=tags)
[![GoDoc](https://godoc.org/github.com/chihaya/chihaya?status.svg)](https://godoc.org/github.com/chihaya/chihaya) [![GoDoc](https://godoc.org/github.com/chihaya/chihaya?status.svg)](https://godoc.org/github.com/chihaya/chihaya)
[![License](https://img.shields.io/badge/license-BSD-blue.svg)](https://en.wikipedia.org/wiki/BSD_licenses#2-clause_license_.28.22Simplified_BSD_License.22_or_.22FreeBSD_License.22.29) [![License](https://img.shields.io/badge/license-BSD-blue.svg)](https://en.wikipedia.org/wiki/BSD_licenses#2-clause_license_.28.22Simplified_BSD_License.22_or_.22FreeBSD_License.22.29)
[![IRC Channel](https://img.shields.io/badge/IRC-%23chihaya-%235555ff.svg "IRC Channel")](https://web.libera.chat/#chihaya) [![IRC Channel](https://img.shields.io/badge/IRC-%23chihaya-%235555ff.svg "IRC Channel")](https://web.libera.chat/#chihaya)
@ -57,10 +57,10 @@ For more information read [CONTRIBUTING.md].
In order to compile the project, the [latest stable version of Go] and knowledge of a [working Go environment] are required. In order to compile the project, the [latest stable version of Go] and knowledge of a [working Go environment] are required.
```sh ```sh
$ git clone git@github.com:chihaya/chihaya.git git clone git@github.com:chihaya/chihaya.git
$ cd chihaya cd chihaya
$ go build ./cmd/chihaya go build ./cmd/chihaya
$ ./chihaya --help ./chihaya --help
``` ```
[latest stable version of Go]: https://golang.org/dl [latest stable version of Go]: https://golang.org/dl
@ -79,15 +79,15 @@ The following will run all tests and benchmarks.
Removing `-bench` will just run unit tests. Removing `-bench` will just run unit tests.
```sh ```sh
$ go test -bench $(go list ./...) go test -bench $(go list ./...)
``` ```
The Chihaya executable contains a command to end-to-end test a BitTorrent tracker. The Chihaya executable contains a command to end-to-end test a BitTorrent tracker.
See See
```sh ```sh
$ chihaya --help chihaya --help
``` ```
### Configuration ### Configuration

View file

@ -5,7 +5,7 @@ import (
) )
func TestClientID(t *testing.T) { func TestClientID(t *testing.T) {
var clientTable = []struct{ peerID, clientID string }{ clientTable := []struct{ peerID, clientID string }{
{"-AZ3034-6wfG2wk6wWLc", "AZ3034"}, {"-AZ3034-6wfG2wk6wWLc", "AZ3034"},
{"-AZ3042-6ozMq5q6Q3NX", "AZ3042"}, {"-AZ3042-6ozMq5q6Q3NX", "AZ3042"},
{"-BS5820-oy4La2MWGEFj", "BS5820"}, {"-BS5820-oy4La2MWGEFj", "BS5820"},

View file

@ -8,7 +8,7 @@ import (
) )
func TestNew(t *testing.T) { func TestNew(t *testing.T) {
var table = []struct { table := []struct {
data string data string
expected Event expected Event
expectedErr error expectedErr error

View file

@ -187,15 +187,15 @@ func (qp *QueryParams) String(key string) (string, bool) {
return value, ok return value, ok
} }
// Uint64 returns a uint parsed from a query. After being called, it is safe to // Uint returns a uint parsed from a query. After being called, it is safe to
// cast the uint64 to your desired length. // cast the uint64 to your desired length.
func (qp *QueryParams) Uint64(key string) (uint64, error) { func (qp *QueryParams) Uint(key string, bitSize int) (uint64, error) {
str, exists := qp.params[key] str, exists := qp.params[key]
if !exists { if !exists {
return 0, ErrKeyNotFound return 0, ErrKeyNotFound
} }
val, err := strconv.ParseUint(str, 10, 64) val, err := strconv.ParseUint(str, 10, bitSize)
if err != nil { if err != nil {
return 0, err return 0, err
} }

View file

@ -92,7 +92,7 @@ func TestParseInvalidURLData(t *testing.T) {
func TestParseShouldNotPanicURLData(t *testing.T) { func TestParseShouldNotPanicURLData(t *testing.T) {
for _, parseStr := range shouldNotPanicQueries { for _, parseStr := range shouldNotPanicQueries {
ParseURLData(parseStr) _, _ = ParseURLData(parseStr)
} }
} }

View file

@ -1,8 +1,8 @@
package main package main
import ( import (
"context"
"errors" "errors"
"os"
"os/signal" "os/signal"
"runtime" "runtime"
"strings" "strings"
@ -100,7 +100,7 @@ func (r *Run) Start(ps storage.PeerStore) error {
} }
func combineErrors(prefix string, errs []error) error { func combineErrors(prefix string, errs []error) error {
var errStrs []string errStrs := make([]string, 0, len(errs))
for _, err := range errs { for _, err := range errs {
errStrs = append(errStrs, err.Error()) errStrs = append(errStrs, err.Error())
} }
@ -144,15 +144,13 @@ func RootRunCmdFunc(cmd *cobra.Command, args []string) error {
return err return err
} }
quit := make(chan os.Signal) ctx, _ := signal.NotifyContext(context.Background(), syscall.SIGINT, syscall.SIGTERM)
signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) reload, _ := signal.NotifyContext(context.Background(), ReloadSignals...)
reload := makeReloadChan()
for { for {
select { select {
case <-reload: case <-reload.Done():
log.Info("reloading; received SIGUSR1") log.Info("reloading; received reload signal")
peerStore, err := r.Stop(true) peerStore, err := r.Stop(true)
if err != nil { if err != nil {
return err return err
@ -161,8 +159,8 @@ func RootRunCmdFunc(cmd *cobra.Command, args []string) error {
if err := r.Start(peerStore); err != nil { if err := r.Start(peerStore); err != nil {
return err return err
} }
case <-quit: case <-ctx.Done():
log.Info("shutting down; received SIGINT/SIGTERM") log.Info("shutting down; received shutdown signal")
if _, err := r.Stop(false); err != nil { if _, err := r.Stop(false); err != nil {
return err return err
} }
@ -210,7 +208,7 @@ func RootPostRunCmdFunc(cmd *cobra.Command, args []string) error {
} }
func main() { func main() {
var rootCmd = &cobra.Command{ rootCmd := &cobra.Command{
Use: "chihaya", Use: "chihaya",
Short: "BitTorrent Tracker", Short: "BitTorrent Tracker",
Long: "A customizable, multi-protocol BitTorrent Tracker", Long: "A customizable, multi-protocol BitTorrent Tracker",
@ -229,7 +227,7 @@ func main() {
rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file") rootCmd.Flags().String("config", "/etc/chihaya.yaml", "location of configuration file")
var e2eCmd = &cobra.Command{ e2eCmd := &cobra.Command{
Use: "e2e", Use: "e2e",
Short: "exec e2e tests", Short: "exec e2e tests",
Long: "Execute the Chihaya end-to-end test suite", Long: "Execute the Chihaya end-to-end test suite",

View file

@ -1,15 +1,15 @@
//go:build darwin || freebsd || linux || netbsd || openbsd || dragonfly || solaris
// +build darwin freebsd linux netbsd openbsd dragonfly solaris // +build darwin freebsd linux netbsd openbsd dragonfly solaris
package main package main
import ( import (
"os" "os"
"os/signal"
"syscall" "syscall"
) )
func makeReloadChan() <-chan os.Signal { // ReloadSignals are the signals that the current OS will send to the process
reload := make(chan os.Signal) // when a configuration reload is requested.
signal.Notify(reload, syscall.SIGUSR1) var ReloadSignals = []os.Signal{
return reload syscall.SIGUSR1,
} }

View file

@ -1,3 +1,4 @@
//go:build windows
// +build windows // +build windows
package main package main
@ -8,8 +9,6 @@ import (
"syscall" "syscall"
) )
func makeReloadChan() <-chan os.Signal { var ReloadSignals = []os.Signal{
reload := make(chan os.Signal) syscall.SIGHUP,
signal.Notify(reload, syscall.SIGHUP)
return reload
} }

View file

@ -1,15 +1,16 @@
---
chihaya: chihaya:
# The interval communicated with BitTorrent clients informing them how # The interval communicated with BitTorrent clients informing them how
# frequently they should announce in between client events. # frequently they should announce in between client events.
announce_interval: 30m announce_interval: "30m"
# The interval communicated with BitTorrent clients informing them of the # The interval communicated with BitTorrent clients informing them of the
# minimal duration between announces. # minimal duration between announces.
min_announce_interval: 15m min_announce_interval: "15m"
# The network interface that will bind to an HTTP endpoint that can be # The network interface that will bind to an HTTP endpoint that can be
# scraped by programs collecting metrics. # scraped by programs collecting metrics.
# #
# /metrics serves metrics in the Prometheus format # /metrics serves metrics in the Prometheus format
# /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format # /debug/pprof/{cmdline,profile,symbol,trace} serves profiles in the pprof format
metrics_addr: "0.0.0.0:6880" metrics_addr: "0.0.0.0:6880"
@ -30,14 +31,14 @@ chihaya:
tls_key_path: "" tls_key_path: ""
# The timeout durations for HTTP requests. # The timeout durations for HTTP requests.
read_timeout: 5s read_timeout: "5s"
write_timeout: 5s write_timeout: "5s"
# When true, persistent connections will be allowed. Generally this is not # When true, persistent connections will be allowed. Generally this is not
# useful for a public tracker, but helps performance in some cases (use of # useful for a public tracker, but helps performance in some cases (use of
# a reverse proxy, or when there are few clients issuing many requests). # a reverse proxy, or when there are few clients issuing many requests).
enable_keepalive: false enable_keepalive: false
idle_timeout: 30s idle_timeout: "30s"
# Whether to time requests. # Whether to time requests.
# Disabling this should increase performance/decrease load. # Disabling this should increase performance/decrease load.
@ -88,7 +89,7 @@ chihaya:
addr: "0.0.0.0:6969" addr: "0.0.0.0:6969"
# The leeway for a timestamp on a connection ID. # The leeway for a timestamp on a connection ID.
max_clock_skew: 10s max_clock_skew: "10s"
# The key used to encrypt connection IDs. # The key used to encrypt connection IDs.
private_key: "paste a random string here that will be used to hmac connection IDs" private_key: "paste a random string here that will be used to hmac connection IDs"
@ -113,17 +114,17 @@ chihaya:
# This block defines configuration used for the storage of peer data. # This block defines configuration used for the storage of peer data.
storage: storage:
name: memory name: "memory"
config: config:
# The frequency which stale peers are removed. # The frequency which stale peers are removed.
# This balances between # This balances between
# - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value) # - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
# - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value). # - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
gc_interval: 3m gc_interval: "3m"
# The amount of time until a peer is considered stale. # The amount of time until a peer is considered stale.
# To avoid churn, keep this slightly larger than `announce_interval` # To avoid churn, keep this slightly larger than `announce_interval`
peer_lifetime: 31m peer_lifetime: "31m"
# The number of partitions data will be divided into in order to provide a # The number of partitions data will be divided into in order to provide a
# higher degree of parallelism. # higher degree of parallelism.
@ -131,7 +132,7 @@ chihaya:
# The interval at which metrics about the number of infohashes and peers # The interval at which metrics about the number of infohashes and peers
# are collected and posted to Prometheus. # are collected and posted to Prometheus.
prometheus_reporting_interval: 1s prometheus_reporting_interval: "1s"
# This block defines configuration used for redis storage. # This block defines configuration used for redis storage.
# storage: # storage:
@ -141,56 +142,56 @@ chihaya:
# # This balances between # # This balances between
# # - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value) # # - collecting garbage more often, potentially using more CPU time, but potentially using less memory (lower value)
# # - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value). # # - collecting garbage less frequently, saving CPU time, but keeping old peers long, thus using more memory (higher value).
# gc_interval: 3m # gc_interval: "3m"
# # The interval at which metrics about the number of infohashes and peers # # The interval at which metrics about the number of infohashes and peers
# # are collected and posted to Prometheus. # # are collected and posted to Prometheus.
# prometheus_reporting_interval: 1s # prometheus_reporting_interval: "1s"
# # The amount of time until a peer is considered stale. # # The amount of time until a peer is considered stale.
# # To avoid churn, keep this slightly larger than `announce_interval` # # To avoid churn, keep this slightly larger than `announce_interval`
# peer_lifetime: 31m # peer_lifetime: "31m"
# # The address of redis storage. # # The address of redis storage.
# redis_broker: "redis://pwd@127.0.0.1:6379/0" # redis_broker: "redis://pwd@127.0.0.1:6379/0"
# # The timeout for reading a command reply from redis. # # The timeout for reading a command reply from redis.
# redis_read_timeout: 15s # redis_read_timeout: "15s"
# # The timeout for writing a command to redis. # # The timeout for writing a command to redis.
# redis_write_timeout: 15s # redis_write_timeout: "15s"
# # The timeout for connecting to redis server. # # The timeout for connecting to redis server.
# redis_connect_timeout: 15s # redis_connect_timeout: "15s"
# This block defines configuration used for middleware executed before a # This block defines configuration used for middleware executed before a
# response has been returned to a BitTorrent client. # response has been returned to a BitTorrent client.
prehooks: prehooks:
#- name: jwt # - name: "jwt"
# options: # options:
# issuer: "https://issuer.com" # issuer: "https://issuer.com"
# audience: "https://chihaya.issuer.com" # audience: "https://chihaya.issuer.com"
# jwk_set_url: "https://issuer.com/keys" # jwk_set_url: "https://issuer.com/keys"
# jwk_set_update_interval: 5m # jwk_set_update_interval: "5m"
#- name: client approval # - name: "client approval"
# options: # options:
# whitelist: # whitelist:
# - "OP1011" # - "OP1011"
# blacklist: # blacklist:
# - "OP1012" # - "OP1012"
#- name: interval variation # - name: "interval variation"
# options: # options:
# modify_response_probability: 0.2 # modify_response_probability: 0.2
# max_increase_delta: 60 # max_increase_delta: 60
# modify_min_interval: true # modify_min_interval: true
# This block defines configuration used for torrent approval, it requires to be given # This block defines configuration used for torrent approval, it requires to be given
# hashes for whitelist or for blacklist. Hashes are hexadecimal-encoaded. # hashes for whitelist or for blacklist. Hashes are hexadecimal-encoaded.
#- name: torrent approval # - name: "torrent approval"
# options: # options:
# whitelist: # whitelist:
# - "a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5" # - "a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5a1b2c3d4e5"
# blacklist: # blacklist:
# - "e1d2c3b4a5e1b2c3b4a5e1d2c3b4e5e1d2c3b4a5" # - "e1d2c3b4a5e1b2c3b4a5e1d2c3b4e5e1d2c3b4a5"

View file

@ -1,11 +1,12 @@
---
global: global:
scrape_interval: 5s scrape_interval: "5s"
evaluation_interval: 5s evaluation_interval: "5s"
# A scrape configuration containing exactly one endpoint to scrape: # A scrape configuration containing exactly one endpoint to scrape:
scrape_configs: scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. # The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'local-chihaya' # you can name this however you want - job_name: "local-chihaya" # you can name this however you want
scrape_interval: 5s # optionally override the global scrape_interval scrape_interval: "5s" # optionally override the global scrape_interval
static_configs: static_configs:
- targets: ['localhost:6881'] # provide the address of chihaya's prometheus endpoint - targets: ["localhost:6881"] # provide the address of chihaya's prometheus endpoint

View file

@ -1,6 +1,6 @@
# Architecture # Architecture
### Overview ## Overview
BitTorrent clients send Announce and Scrape requests to a _Frontend_. BitTorrent clients send Announce and Scrape requests to a _Frontend_.
Frontends parse requests and write responses for the particular protocol they implement. Frontends parse requests and write responses for the particular protocol they implement.
@ -11,6 +11,6 @@ After all PreHooks have executed, any missing response fields that are required
PostHooks are asynchronous tasks that occur after a response has been delivered to the client. PostHooks are asynchronous tasks that occur after a response has been delivered to the client.
Because they are unnecessary to for generating a response, updates to the Storage for a particular request are done asynchronously in a PostHook. Because they are unnecessary to for generating a response, updates to the Storage for a particular request are done asynchronously in a PostHook.
### Diagram ## Diagram
![](https://user-images.githubusercontent.com/343539/52676700-05c45c80-2ef9-11e9-9887-8366008b4e7e.png) ![architecture diagram](https://user-images.githubusercontent.com/343539/52676700-05c45c80-2ef9-11e9-9887-8366008b4e7e.png)

View file

@ -44,11 +44,11 @@ The typical control flow of handling announces, in more detail, is:
6. Send the response to the Client. 6. Send the response to the Client.
7. Pass the request and response to the `TrackerLogic`'s `AfterAnnounce` or `AfterScrape` method. 7. Pass the request and response to the `TrackerLogic`'s `AfterAnnounce` or `AfterScrape` method.
8. Finish, accept next request. 8. Finish, accept next request.
9. For invalid requests or errors during processing: Send an error response to the client. 9. For invalid requests or errors during processing: Send an error response to the client.
This step may be skipped for suspected denial-of-service attacks. This step may be skipped for suspected denial-of-service attacks.
The error response may contain information about the cause of the error. The error response may contain information about the cause of the error.
Only errors where the Client is at fault should be explained, internal server errors should be returned without explanation. Only errors where the Client is at fault should be explained, internal server errors should be returned without explanation.
Then finish, and accept the next request. Then finish, and accept the next request.
#### Configuration #### Configuration
@ -62,20 +62,22 @@ Frontends may provide runtime metrics, such as the number of requests or their d
Metrics must be reported using [Prometheus]. Metrics must be reported using [Prometheus].
A frontend should provide at least the following metrics: A frontend should provide at least the following metrics:
- The number of valid and invalid requests handled - The number of valid and invalid requests handled
- The average time it takes to handle a single request. - The average time it takes to handle a single request.
This request timing should be made optional using a config entry. This request timing should be made optional using a config entry.
Requests should be separated by type, i.e. Scrapes, Announces, and other protocol-specific requests. Requests should be separated by type, i.e. Scrapes, Announces, and other protocol-specific requests.
If the frontend serves multiple transports or networks, metrics for them should be separable. If the frontend serves multiple transports or networks, metrics for them should be separable.
It is recommended to publish one Prometheus `HistogramVec` with: It is recommended to publish one Prometheus `HistogramVec` with:
- A name like `chihaya_PROTOCOL_response_duration_milliseconds` - A name like `chihaya_PROTOCOL_response_duration_milliseconds`
- A value holding the duration in milliseconds of the reported request - A value holding the duration in milliseconds of the reported request
- Labels for: - Labels for:
- `action` (= `announce`, `scrape`, ...) - `action` (= `announce`, `scrape`, ...)
- `address_family` (= `Unknown`, `IPv4`, `IPv6`, ...), if applicable - `address_family` (= `Unknown`, `IPv4`, `IPv6`, ...), if applicable
- `error` (= A textual representation of the error encountered during processing.) - `error` (= A textual representation of the error encountered during processing.)
Because `error` is expected to hold the textual representation of any error that occurred during the request, great care must be taken to ensure all error messages are static. Because `error` is expected to hold the textual representation of any error that occurred during the request, great care must be taken to ensure all error messages are static.
`error` must not contain any information directly taken from the request, e.g. the value of an invalid parameter. `error` must not contain any information directly taken from the request, e.g. the value of an invalid parameter.
This would cause this dimension of prometheus to explode, which slows down prometheus clients and reporters. This would cause this dimension of prometheus to explode, which slows down prometheus clients and reporters.
@ -106,4 +108,4 @@ This way, a PreHook can communicate with a PostHook by setting a context value.
[BEP 3]: http://bittorrent.org/beps/bep_0003.html [BEP 3]: http://bittorrent.org/beps/bep_0003.html
[BEP 15]: http://bittorrent.org/beps/bep_0015.html [BEP 15]: http://bittorrent.org/beps/bep_0015.html
[Prometheus]: https://prometheus.io/ [Prometheus]: https://prometheus.io/
[old-opentracker-style]: https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/ [old-opentracker-style]: https://web.archive.org/web/20170503181830/http://opentracker.blog.h3q.com/2007/12/28/the-ipv6-situation/

View file

@ -59,7 +59,7 @@ All the InfoHashes (swarms) are also stored in a redis hash, with IP family as t
Here is an example: Here is an example:
``` ```yaml
- IPv4 - IPv4
- IPv4_S_<infohash 1>: <modification time> - IPv4_S_<infohash 1>: <modification time>
- IPv4_L_<infohash 1>: <modification time> - IPv4_L_<infohash 1>: <modification time>
@ -73,15 +73,14 @@ Here is an example:
- <peer 3 key>: <modification time> - <peer 3 key>: <modification time>
``` ```
In this case, prometheus would record two swarms, three seeders, and one leecher. In this case, prometheus would record two swarms, three seeders, and one leecher.
These three keys per address family are used to record the count of swarms, seeders, and leechers. These three keys per address family are used to record the count of swarms, seeders, and leechers.
``` ```yaml
- IPv4_infohash_count: 2 - IPv4_infohash_count: 2
- IPv4_S_count: 3 - IPv4_S_count: 3
- IPv4_L_count: 1 - IPv4_L_count: 1
``` ```
Note: IPv4_infohash_count has a different meaning compared to the `memory` storage: Note: `IPv4_infohash_count` has a different meaning compared to the `memory` storage:
It represents the number of infohashes reported by seeder, meaning that infohashes without seeders are not counted. It represents the number of infohashes reported by seeder, meaning that infohashes without seeders are not counted.

View file

@ -46,8 +46,8 @@ func BenchmarkUnmarshalScalar(b *testing.B) {
d2 := NewDecoder(&bufferLoop{"i42e"}) d2 := NewDecoder(&bufferLoop{"i42e"})
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
d1.Decode() _, _ = d1.Decode()
d2.Decode() _, _ = d2.Decode()
} }
} }
@ -79,6 +79,6 @@ func BenchmarkUnmarshalLarge(b *testing.B) {
dec := NewDecoder(&bufferLoop{string(buf)}) dec := NewDecoder(&bufferLoop{string(buf)})
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
dec.Decode() _, _ = dec.Decode()
} }
} }

View file

@ -66,7 +66,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
err = marshalInt(w, int64(v)) err = marshalInt(w, int64(v))
case int64: case int64:
err = marshalInt(w, int64(v)) err = marshalInt(w, v)
case uint: case uint:
err = marshalUint(w, uint64(v)) err = marshalUint(w, uint64(v))
@ -78,7 +78,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
err = marshalUint(w, uint64(v)) err = marshalUint(w, uint64(v))
case uint64: case uint64:
err = marshalUint(w, uint64(v)) err = marshalUint(w, v)
case time.Duration: // Assume seconds case time.Duration: // Assume seconds
err = marshalInt(w, int64(v/time.Second)) err = marshalInt(w, int64(v/time.Second))
@ -90,7 +90,7 @@ func marshal(w io.Writer, data interface{}) (err error) {
err = marshalList(w, v) err = marshalList(w, v)
case []Dict: case []Dict:
var interfaceSlice = make([]interface{}, len(v)) interfaceSlice := make([]interface{}, len(v))
for i, d := range v { for i, d := range v {
interfaceSlice[i] = d interfaceSlice[i] = d
} }

View file

@ -50,8 +50,8 @@ func BenchmarkMarshalScalar(b *testing.B) {
encoder := NewEncoder(buf) encoder := NewEncoder(buf)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
encoder.Encode("test") _ = encoder.Encode("test")
encoder.Encode(123) _ = encoder.Encode(123)
} }
} }
@ -67,6 +67,6 @@ func BenchmarkMarshalLarge(b *testing.B) {
encoder := NewEncoder(buf) encoder := NewEncoder(buf)
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
encoder.Encode(data) _ = encoder.Encode(data)
} }
} }

View file

@ -164,6 +164,7 @@ func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error
if cfg.TLSCertPath != "" && cfg.TLSKeyPath != "" { if cfg.TLSCertPath != "" && cfg.TLSKeyPath != "" {
var err error var err error
f.tlsCfg = &tls.Config{ f.tlsCfg = &tls.Config{
MinVersion: tls.VersionTLS12,
Certificates: make([]tls.Certificate, 1), Certificates: make([]tls.Certificate, 1),
} }
f.tlsCfg.Certificates[0], err = tls.LoadX509KeyPair(cfg.TLSCertPath, cfg.TLSKeyPath) f.tlsCfg.Certificates[0], err = tls.LoadX509KeyPair(cfg.TLSCertPath, cfg.TLSKeyPath)
@ -265,7 +266,7 @@ func (f *Frontend) serveHTTP(l net.Listener) error {
f.srv.SetKeepAlivesEnabled(f.EnableKeepAlive) f.srv.SetKeepAlivesEnabled(f.EnableKeepAlive)
// Start the HTTP server. // Start the HTTP server.
if err := f.srv.Serve(l); err != http.ErrServerClosed { if err := f.srv.Serve(l); !errors.Is(err, http.ErrServerClosed) {
return err return err
} }
return nil return nil
@ -285,7 +286,7 @@ func (f *Frontend) serveHTTPS(l net.Listener) error {
f.tlsSrv.SetKeepAlivesEnabled(f.EnableKeepAlive) f.tlsSrv.SetKeepAlivesEnabled(f.EnableKeepAlive)
// Start the HTTP server. // Start the HTTP server.
if err := f.tlsSrv.ServeTLS(l, "", ""); err != http.ErrServerClosed { if err := f.tlsSrv.ServeTLS(l, "", ""); !errors.Is(err, http.ErrServerClosed) {
return err return err
} }
return nil return nil
@ -317,7 +318,7 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps http
req, err := ParseAnnounce(r, f.ParseOptions) req, err := ParseAnnounce(r, f.ParseOptions)
if err != nil { if err != nil {
WriteError(w, err) _ = WriteError(w, err)
return return
} }
af = new(bittorrent.AddressFamily) af = new(bittorrent.AddressFamily)
@ -326,14 +327,14 @@ func (f *Frontend) announceRoute(w http.ResponseWriter, r *http.Request, ps http
ctx := injectRouteParamsToContext(context.Background(), ps) ctx := injectRouteParamsToContext(context.Background(), ps)
ctx, resp, err := f.logic.HandleAnnounce(ctx, req) ctx, resp, err := f.logic.HandleAnnounce(ctx, req)
if err != nil { if err != nil {
WriteError(w, err) _ = WriteError(w, err)
return return
} }
w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Type", "text/plain; charset=utf-8")
err = WriteAnnounceResponse(w, resp) err = WriteAnnounceResponse(w, resp)
if err != nil { if err != nil {
WriteError(w, err) _ = WriteError(w, err)
return return
} }
@ -358,14 +359,14 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
req, err := ParseScrape(r, f.ParseOptions) req, err := ParseScrape(r, f.ParseOptions)
if err != nil { if err != nil {
WriteError(w, err) _ = WriteError(w, err)
return return
} }
host, _, err := net.SplitHostPort(r.RemoteAddr) host, _, err := net.SplitHostPort(r.RemoteAddr)
if err != nil { if err != nil {
log.Error("http: unable to determine remote address for scrape", log.Err(err)) log.Error("http: unable to determine remote address for scrape", log.Err(err))
WriteError(w, err) _ = WriteError(w, err)
return return
} }
@ -376,7 +377,7 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
req.AddressFamily = bittorrent.IPv6 req.AddressFamily = bittorrent.IPv6
} else { } else {
log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr}) log.Error("http: invalid IP: neither v4 nor v6", log.Fields{"RemoteAddr": r.RemoteAddr})
WriteError(w, bittorrent.ErrInvalidIP) _ = WriteError(w, bittorrent.ErrInvalidIP)
return return
} }
af = new(bittorrent.AddressFamily) af = new(bittorrent.AddressFamily)
@ -385,14 +386,14 @@ func (f *Frontend) scrapeRoute(w http.ResponseWriter, r *http.Request, ps httpro
ctx := injectRouteParamsToContext(context.Background(), ps) ctx := injectRouteParamsToContext(context.Background(), ps)
ctx, resp, err := f.logic.HandleScrape(ctx, req) ctx, resp, err := f.logic.HandleScrape(ctx, req)
if err != nil { if err != nil {
WriteError(w, err) _ = WriteError(w, err)
return return
} }
w.Header().Set("Content-Type", "text/plain; charset=utf-8") w.Header().Set("Content-Type", "text/plain; charset=utf-8")
err = WriteScrapeResponse(w, resp) err = WriteScrapeResponse(w, resp)
if err != nil { if err != nil {
WriteError(w, err) _ = WriteError(w, err)
return return
} }

View file

@ -1,6 +1,7 @@
package http package http
import ( import (
"errors"
"net" "net"
"net/http" "net/http"
@ -73,26 +74,26 @@ func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequ
request.Peer.ID = bittorrent.PeerIDFromString(peerID) request.Peer.ID = bittorrent.PeerIDFromString(peerID)
// Determine the number of remaining bytes for the client. // Determine the number of remaining bytes for the client.
request.Left, err = qp.Uint64("left") request.Left, err = qp.Uint("left", 64)
if err != nil { if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: left") return nil, bittorrent.ClientError("failed to parse parameter: left")
} }
// Determine the number of bytes downloaded by the client. // Determine the number of bytes downloaded by the client.
request.Downloaded, err = qp.Uint64("downloaded") request.Downloaded, err = qp.Uint("downloaded", 64)
if err != nil { if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: downloaded") return nil, bittorrent.ClientError("failed to parse parameter: downloaded")
} }
// Determine the number of bytes shared by the client. // Determine the number of bytes shared by the client.
request.Uploaded, err = qp.Uint64("uploaded") request.Uploaded, err = qp.Uint("uploaded", 64)
if err != nil { if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: uploaded") return nil, bittorrent.ClientError("failed to parse parameter: uploaded")
} }
// Determine the number of peers the client wants in the response. // Determine the number of peers the client wants in the response.
numwant, err := qp.Uint64("numwant") numwant, err := qp.Uint("numwant", 32)
if err != nil && err != bittorrent.ErrKeyNotFound { if err != nil && !errors.Is(err, bittorrent.ErrKeyNotFound) {
return nil, bittorrent.ClientError("failed to parse parameter: numwant") return nil, bittorrent.ClientError("failed to parse parameter: numwant")
} }
// If there were no errors, the user actually provided the numwant. // If there were no errors, the user actually provided the numwant.
@ -100,7 +101,7 @@ func ParseAnnounce(r *http.Request, opts ParseOptions) (*bittorrent.AnnounceRequ
request.NumWant = uint32(numwant) request.NumWant = uint32(numwant)
// Parse the port where the client is listening. // Parse the port where the client is listening.
port, err := qp.Uint64("port") port, err := qp.Uint("port", 16)
if err != nil { if err != nil {
return nil, bittorrent.ClientError("failed to parse parameter: port") return nil, bittorrent.ClientError("failed to parse parameter: port")
} }

View file

@ -1,6 +1,7 @@
package http package http
import ( import (
"errors"
"time" "time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -26,8 +27,9 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) { func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
var errString string var errString string
if err != nil { if err != nil {
if _, ok := err.(bittorrent.ClientError); ok { var clientErr bittorrent.ClientError
errString = err.Error() if errors.As(err, &clientErr) {
errString = clientErr.Error()
} else { } else {
errString = "internal error" errString = "internal error"
} }

View file

@ -1,6 +1,7 @@
package http package http
import ( import (
"errors"
"net/http" "net/http"
"github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/bittorrent"
@ -11,8 +12,9 @@ import (
// WriteError communicates an error to a BitTorrent client over HTTP. // WriteError communicates an error to a BitTorrent client over HTTP.
func WriteError(w http.ResponseWriter, err error) error { func WriteError(w http.ResponseWriter, err error) error {
message := "internal server error" message := "internal server error"
if _, clientErr := err.(bittorrent.ClientError); clientErr { var clientErr bittorrent.ClientError
message = err.Error() if errors.As(err, &clientErr) {
message = clientErr.Error()
} else { } else {
log.Error("http: internal error", log.Err(err)) log.Error("http: internal error", log.Err(err))
} }
@ -57,7 +59,7 @@ func WriteAnnounceResponse(w http.ResponseWriter, resp *bittorrent.AnnounceRespo
} }
// Add the peers to the dictionary. // Add the peers to the dictionary.
var peers []bencode.Dict peers := make([]bencode.Dict, 0, len(resp.IPv4Peers)+len(resp.IPv6Peers))
for _, peer := range resp.IPv4Peers { for _, peer := range resp.IPv4Peers {
peers = append(peers, dict(peer)) peers = append(peers, dict(peer))
} }

View file

@ -11,7 +11,7 @@ import (
) )
func TestWriteError(t *testing.T) { func TestWriteError(t *testing.T) {
var table = []struct { table := []struct {
reason, expected string reason, expected string
}{ }{
{"hello world", "d14:failure reason11:hello worlde"}, {"hello world", "d14:failure reason11:hello worlde"},
@ -29,7 +29,7 @@ func TestWriteError(t *testing.T) {
} }
func TestWriteStatus(t *testing.T) { func TestWriteStatus(t *testing.T) {
var table = []struct { table := []struct {
reason, expected string reason, expected string
}{ }{
{"something is missing", "d14:failure reason20:something is missinge"}, {"something is missing", "d14:failure reason20:something is missinge"},

View file

@ -11,24 +11,27 @@ type BytePool struct {
func New(length int) *BytePool { func New(length int) *BytePool {
var bp BytePool var bp BytePool
bp.Pool.New = func() interface{} { bp.Pool.New = func() interface{} {
return make([]byte, length, length) b := make([]byte, length)
return &b
} }
return &bp return &bp
} }
// Get returns a byte slice from the pool. // Get returns a byte slice from the pool.
func (bp *BytePool) Get() []byte { func (bp *BytePool) Get() *[]byte {
return bp.Pool.Get().([]byte) return bp.Pool.Get().(*[]byte)
} }
// Put returns a byte slice to the pool. // Put returns a byte slice to the pool.
func (bp *BytePool) Put(b []byte) { func (bp *BytePool) Put(b *[]byte) {
b = b[:cap(b)] *b = (*b)[:cap(*b)]
// Zero out the bytes. // Zero out the bytes.
// Apparently this specific expression is optimized by the compiler, see // This specific expression is optimized by the compiler:
// github.com/golang/go/issues/5373. // https://github.com/golang/go/issues/5373.
for i := range b { for i := range *b {
b[i] = 0 (*b)[i] = 0
} }
bp.Pool.Put(b) bp.Pool.Put(b)
} }

View file

@ -6,6 +6,7 @@ import (
"bytes" "bytes"
"context" "context"
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"math/rand" "math/rand"
"net" "net"
@ -123,8 +124,7 @@ func NewFrontend(logic frontend.TrackerLogic, provided Config) (*Frontend, error
}, },
} }
err := f.listen() if err := f.listen(); err != nil {
if err != nil {
return nil, err return nil, err
} }
@ -148,7 +148,7 @@ func (t *Frontend) Stop() stop.Result {
c := make(stop.Channel) c := make(stop.Channel)
go func() { go func() {
close(t.closing) close(t.closing)
t.socket.SetReadDeadline(time.Now()) _ = t.socket.SetReadDeadline(time.Now())
t.wg.Wait() t.wg.Wait()
c.Done(t.socket.Close()) c.Done(t.socket.Close())
}() }()
@ -185,10 +185,11 @@ func (t *Frontend) serve() error {
// Read a UDP packet into a reusable buffer. // Read a UDP packet into a reusable buffer.
buffer := pool.Get() buffer := pool.Get()
n, addr, err := t.socket.ReadFromUDP(buffer) n, addr, err := t.socket.ReadFromUDP(*buffer)
if err != nil { if err != nil {
pool.Put(buffer) pool.Put(buffer)
if netErr, ok := err.(net.Error); ok && netErr.Temporary() { var netErr net.Error
if errors.As(err, &netErr); netErr.Temporary() {
// A temporary failure is not fatal; just pretend it never happened. // A temporary failure is not fatal; just pretend it never happened.
continue continue
} }
@ -217,7 +218,7 @@ func (t *Frontend) serve() error {
} }
action, af, err := t.handleRequest( action, af, err := t.handleRequest(
// Make sure the IP is copied, not referenced. // Make sure the IP is copied, not referenced.
Request{buffer[:n], append([]byte{}, addr.IP...)}, Request{(*buffer)[:n], append([]byte{}, addr.IP...)},
ResponseWriter{t.socket, addr}, ResponseWriter{t.socket, addr},
) )
if t.EnableRequestTiming { if t.EnableRequestTiming {
@ -244,7 +245,7 @@ type ResponseWriter struct {
// Write implements the io.Writer interface for a ResponseWriter. // Write implements the io.Writer interface for a ResponseWriter.
func (w ResponseWriter) Write(b []byte) (int, error) { func (w ResponseWriter) Write(b []byte) (int, error) {
w.socket.WriteToUDP(b, w.addr) _, _ = w.socket.WriteToUDP(b, w.addr)
return len(b), nil return len(b), nil
} }

View file

@ -24,8 +24,8 @@ const (
// Option-Types as described in BEP 41 and BEP 45. // Option-Types as described in BEP 41 and BEP 45.
const ( const (
optionEndOfOptions byte = 0x0 optionEndOfOptions byte = 0x0
optionNOP = 0x1 optionNOP byte = 0x1
optionURLData = 0x2 optionURLData byte = 0x2
) )
var ( var (
@ -115,7 +115,7 @@ func ParseAnnounce(r Request, v6Action bool, opts ParseOptions) (*bittorrent.Ann
request := &bittorrent.AnnounceRequest{ request := &bittorrent.AnnounceRequest{
Event: eventIDs[eventID], Event: eventIDs[eventID],
InfoHash: bittorrent.InfoHashFromBytes(infohash), InfoHash: bittorrent.InfoHashFromBytes(infohash),
NumWant: uint32(numWant), NumWant: numWant,
Left: left, Left: left,
Downloaded: downloaded, Downloaded: downloaded,
Uploaded: uploaded, Uploaded: uploaded,
@ -161,7 +161,7 @@ func handleOptionalParameters(packet []byte) (bittorrent.Params, error) {
return bittorrent.ParseURLData("") return bittorrent.ParseURLData("")
} }
var buf = newBuffer() buf := newBuffer()
defer buf.free() defer buf.free()
for i := 0; i < len(packet); { for i := 0; i < len(packet); {

View file

@ -1,6 +1,7 @@
package udp package udp
import ( import (
"errors"
"fmt" "fmt"
"testing" "testing"
) )
@ -51,7 +52,7 @@ func TestHandleOptionalParameters(t *testing.T) {
for _, tt := range table { for _, tt := range table {
t.Run(fmt.Sprintf("%#v as %#v", tt.data, tt.values), func(t *testing.T) { t.Run(fmt.Sprintf("%#v as %#v", tt.data, tt.values), func(t *testing.T) {
params, err := handleOptionalParameters(tt.data) params, err := handleOptionalParameters(tt.data)
if err != tt.err { if !errors.Is(err, tt.err) {
if tt.err == nil { if tt.err == nil {
t.Fatalf("expected no parsing error for %x but got %s", tt.data, err) t.Fatalf("expected no parsing error for %x but got %s", tt.data, err)
} else { } else {

View file

@ -1,6 +1,7 @@
package udp package udp
import ( import (
"errors"
"time" "time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -26,8 +27,9 @@ var promResponseDurationMilliseconds = prometheus.NewHistogramVec(
func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) { func recordResponseDuration(action string, af *bittorrent.AddressFamily, err error, duration time.Duration) {
var errString string var errString string
if err != nil { if err != nil {
if _, ok := err.(bittorrent.ClientError); ok { var clientErr bittorrent.ClientError
errString = err.Error() if errors.As(err, &clientErr) {
errString = clientErr.Error()
} else { } else {
errString = "internal error" errString = "internal error"
} }

View file

@ -2,6 +2,7 @@ package udp
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"fmt" "fmt"
"io" "io"
"time" "time"
@ -12,15 +13,16 @@ import (
// WriteError writes the failure reason as a null-terminated string. // WriteError writes the failure reason as a null-terminated string.
func WriteError(w io.Writer, txID []byte, err error) { func WriteError(w io.Writer, txID []byte, err error) {
// If the client wasn't at fault, acknowledge it. // If the client wasn't at fault, acknowledge it.
if _, ok := err.(bittorrent.ClientError); !ok { var clientErr bittorrent.ClientError
err = fmt.Errorf("internal error occurred: %s", err.Error()) if !errors.As(err, &clientErr) {
err = fmt.Errorf("internal error occurred: %w", err)
} }
buf := newBuffer() buf := newBuffer()
writeHeader(buf, txID, errorActionID) writeHeader(buf, txID, errorActionID)
buf.WriteString(err.Error()) buf.WriteString(err.Error())
buf.WriteRune('\000') buf.WriteRune('\000')
w.Write(buf.Bytes()) _, _ = w.Write(buf.Bytes())
buf.free() buf.free()
} }
@ -37,9 +39,9 @@ func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse,
} else { } else {
writeHeader(buf, txID, announceActionID) writeHeader(buf, txID, announceActionID)
} }
binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second)) _ = binary.Write(buf, binary.BigEndian, uint32(resp.Interval/time.Second))
binary.Write(buf, binary.BigEndian, resp.Incomplete) _ = binary.Write(buf, binary.BigEndian, resp.Incomplete)
binary.Write(buf, binary.BigEndian, resp.Complete) _ = binary.Write(buf, binary.BigEndian, resp.Complete)
peers := resp.IPv4Peers peers := resp.IPv4Peers
if v6Peers { if v6Peers {
@ -48,10 +50,10 @@ func WriteAnnounce(w io.Writer, txID []byte, resp *bittorrent.AnnounceResponse,
for _, peer := range peers { for _, peer := range peers {
buf.Write(peer.IP.IP) buf.Write(peer.IP.IP)
binary.Write(buf, binary.BigEndian, peer.Port) _ = binary.Write(buf, binary.BigEndian, peer.Port)
} }
w.Write(buf.Bytes()) _, _ = w.Write(buf.Bytes())
buf.free() buf.free()
} }
@ -62,12 +64,12 @@ func WriteScrape(w io.Writer, txID []byte, resp *bittorrent.ScrapeResponse) {
writeHeader(buf, txID, scrapeActionID) writeHeader(buf, txID, scrapeActionID)
for _, scrape := range resp.Files { for _, scrape := range resp.Files {
binary.Write(buf, binary.BigEndian, scrape.Complete) _ = binary.Write(buf, binary.BigEndian, scrape.Complete)
binary.Write(buf, binary.BigEndian, scrape.Snatches) _ = binary.Write(buf, binary.BigEndian, scrape.Snatches)
binary.Write(buf, binary.BigEndian, scrape.Incomplete) _ = binary.Write(buf, binary.BigEndian, scrape.Incomplete)
} }
w.Write(buf.Bytes()) _, _ = w.Write(buf.Bytes())
buf.free() buf.free()
} }
@ -78,13 +80,13 @@ func WriteConnectionID(w io.Writer, txID, connID []byte) {
writeHeader(buf, txID, connectActionID) writeHeader(buf, txID, connectActionID)
buf.Write(connID) buf.Write(connID)
w.Write(buf.Bytes()) _, _ = w.Write(buf.Bytes())
buf.free() buf.free()
} }
// writeHeader writes the action and transaction ID to the provided response // writeHeader writes the action and transaction ID to the provided response
// buffer. // buffer.
func writeHeader(w io.Writer, txID []byte, action uint32) { func writeHeader(w io.Writer, txID []byte, action uint32) {
binary.Write(w, binary.BigEndian, action) _ = binary.Write(w, binary.BigEndian, action)
w.Write(txID) _, _ = w.Write(txID)
} }

View file

@ -28,7 +28,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg) err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err) return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
} }
return NewHook(cfg) return NewHook(cfg)

View file

@ -2,6 +2,7 @@ package middleware
import ( import (
"context" "context"
"errors"
"github.com/chihaya/chihaya/bittorrent" "github.com/chihaya/chihaya/bittorrent"
"github.com/chihaya/chihaya/storage" "github.com/chihaya/chihaya/storage"
@ -37,12 +38,12 @@ func (h *swarmInteractionHook) HandleAnnounce(ctx context.Context, req *bittorre
switch { switch {
case req.Event == bittorrent.Stopped: case req.Event == bittorrent.Stopped:
err = h.store.DeleteSeeder(req.InfoHash, req.Peer) err = h.store.DeleteSeeder(req.InfoHash, req.Peer)
if err != nil && err != storage.ErrResourceDoesNotExist { if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
return ctx, err return ctx, err
} }
err = h.store.DeleteLeecher(req.InfoHash, req.Peer) err = h.store.DeleteLeecher(req.InfoHash, req.Peer)
if err != nil && err != storage.ErrResourceDoesNotExist { if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
return ctx, err return ctx, err
} }
case req.Event == bittorrent.Completed: case req.Event == bittorrent.Completed:
@ -106,7 +107,7 @@ func (h *responseHook) HandleAnnounce(ctx context.Context, req *bittorrent.Annou
func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error { func (h *responseHook) appendPeers(req *bittorrent.AnnounceRequest, resp *bittorrent.AnnounceResponse) error {
seeding := req.Left == 0 seeding := req.Left == 0
peers, err := h.store.AnnouncePeers(req.InfoHash, seeding, int(req.NumWant), req.Peer) peers, err := h.store.AnnouncePeers(req.InfoHash, seeding, int(req.NumWant), req.Peer)
if err != nil && err != storage.ErrResourceDoesNotExist { if err != nil && !errors.Is(err, storage.ErrResourceDoesNotExist) {
return err return err
} }

View file

@ -44,7 +44,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg) err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err) return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
} }
return NewHook(cfg) return NewHook(cfg)
@ -93,8 +93,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
} }
log.Debug("performing initial fetch of JWKs") log.Debug("performing initial fetch of JWKs")
err := h.updateKeys() if err := h.updateKeys(); err != nil {
if err != nil {
return nil, errors.New("failed to fetch initial JWK Set: " + err.Error()) return nil, errors.New("failed to fetch initial JWK Set: " + err.Error())
} }
@ -105,7 +104,7 @@ func NewHook(cfg Config) (middleware.Hook, error) {
return return
case <-time.After(cfg.JWKUpdateInterval): case <-time.After(cfg.JWKUpdateInterval):
log.Debug("performing fetch of JWKs") log.Debug("performing fetch of JWKs")
h.updateKeys() _ = h.updateKeys()
} }
} }
}() }()

View file

@ -11,7 +11,7 @@ import (
// //
// Calling DeriveEntropyFromRequest multiple times yields the same values. // Calling DeriveEntropyFromRequest multiple times yields the same values.
func DeriveEntropyFromRequest(req *bittorrent.AnnounceRequest) (uint64, uint64) { func DeriveEntropyFromRequest(req *bittorrent.AnnounceRequest) (uint64, uint64) {
v0 := binary.BigEndian.Uint64([]byte(req.InfoHash[:8])) + binary.BigEndian.Uint64([]byte(req.InfoHash[8:16])) v0 := binary.BigEndian.Uint64(req.InfoHash[:8]) + binary.BigEndian.Uint64(req.InfoHash[8:16])
v1 := binary.BigEndian.Uint64([]byte(req.Peer.ID[:8])) + binary.BigEndian.Uint64([]byte(req.Peer.ID[8:16])) v1 := binary.BigEndian.Uint64(req.Peer.ID[:8]) + binary.BigEndian.Uint64(req.Peer.ID[8:16])
return v0, v1 return v0, v1
} }

View file

@ -28,7 +28,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg) err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err) return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
} }
return NewHook(cfg) return NewHook(cfg)

View file

@ -29,7 +29,7 @@ func (d driver) NewHook(optionBytes []byte) (middleware.Hook, error) {
var cfg Config var cfg Config
err := yaml.Unmarshal(optionBytes, &cfg) err := yaml.Unmarshal(optionBytes, &cfg)
if err != nil { if err != nil {
return nil, fmt.Errorf("invalid options for middleware %s: %s", Name, err) return nil, fmt.Errorf("invalid options for middleware %s: %w", Name, err)
} }
return NewHook(cfg) return NewHook(cfg)
@ -77,8 +77,7 @@ type hook struct {
// NewHook creates a middleware to randomly modify the announce interval from // NewHook creates a middleware to randomly modify the announce interval from
// the given config. // the given config.
func NewHook(cfg Config) (middleware.Hook, error) { func NewHook(cfg Config) (middleware.Hook, error) {
err := checkConfig(cfg) if err := checkConfig(cfg); err != nil {
if err != nil {
return nil, err return nil, err
} }
@ -96,12 +95,12 @@ func (h *hook) HandleAnnounce(ctx context.Context, req *bittorrent.AnnounceReque
if h.cfg.ModifyResponseProbability == 1 || p < h.cfg.ModifyResponseProbability { if h.cfg.ModifyResponseProbability == 1 || p < h.cfg.ModifyResponseProbability {
// Generate the increase delta. // Generate the increase delta.
v, _, _ = random.Intn(s0, s1, h.cfg.MaxIncreaseDelta) v, _, _ = random.Intn(s0, s1, h.cfg.MaxIncreaseDelta)
addSeconds := time.Duration(v+1) * time.Second deltaDuration := time.Duration(v+1) * time.Second
resp.Interval += addSeconds resp.Interval += deltaDuration
if h.cfg.ModifyMinInterval { if h.cfg.ModifyMinInterval {
resp.MinInterval += addSeconds resp.MinInterval += deltaDuration
} }
return ctx, nil return ctx, nil

View file

@ -4,6 +4,7 @@ package metrics
import ( import (
"context" "context"
"errors"
"net/http" "net/http"
"net/http/pprof" "net/http/pprof"
@ -49,7 +50,7 @@ func NewServer(addr string) *Server {
} }
go func() { go func() {
if err := s.srv.ListenAndServe(); err != http.ErrServerClosed { if err := s.srv.ListenAndServe(); !errors.Is(err, http.ErrServerClosed) {
log.Fatal("failed while serving prometheus", log.Err(err)) log.Fatal("failed while serving prometheus", log.Err(err))
} }
}() }()

View file

@ -4,6 +4,7 @@ package memory
import ( import (
"encoding/binary" "encoding/binary"
"math"
"net" "net"
"runtime" "runtime"
"sync" "sync"
@ -79,7 +80,7 @@ func (cfg Config) LogFields() log.Fields {
func (cfg Config) Validate() Config { func (cfg Config) Validate() Config {
validcfg := cfg validcfg := cfg
if cfg.ShardCount <= 0 { if cfg.ShardCount <= 0 || cfg.ShardCount > (math.MaxInt/2) {
validcfg.ShardCount = defaultShardCount validcfg.ShardCount = defaultShardCount
log.Warn("falling back to default configuration", log.Fields{ log.Warn("falling back to default configuration", log.Fields{
"name": Name + ".ShardCount", "name": Name + ".ShardCount",
@ -142,7 +143,7 @@ func New(provided Config) (storage.PeerStore, error) {
case <-time.After(cfg.GarbageCollectionInterval): case <-time.After(cfg.GarbageCollectionInterval):
before := time.Now().Add(-cfg.PeerLifetime) before := time.Now().Add(-cfg.PeerLifetime)
log.Debug("storage: purging peers with no announces since", log.Fields{"before": before}) log.Debug("storage: purging peers with no announces since", log.Fields{"before": before})
ps.collectGarbage(before) _ = ps.collectGarbage(before)
} }
} }
}() }()
@ -183,7 +184,8 @@ func decodePeerKey(pk serializedPeer) bittorrent.Peer {
peer := bittorrent.Peer{ peer := bittorrent.Peer{
ID: bittorrent.PeerIDFromString(string(pk[:20])), ID: bittorrent.PeerIDFromString(string(pk[:20])),
Port: binary.BigEndian.Uint16([]byte(pk[20:22])), Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
IP: bittorrent.IP{IP: net.IP(pk[22:])}} IP: bittorrent.IP{IP: net.IP(pk[22:])},
}
if ip := peer.IP.To4(); ip != nil { if ip := peer.IP.To4(); ip != nil {
peer.IP.IP = ip peer.IP.IP = ip

View file

@ -25,6 +25,7 @@ package redis
import ( import (
"encoding/binary" "encoding/binary"
"errors"
"net" "net"
"strconv" "strconv"
"sync" "sync"
@ -245,7 +246,8 @@ func decodePeerKey(pk serializedPeer) bittorrent.Peer {
peer := bittorrent.Peer{ peer := bittorrent.Peer{
ID: bittorrent.PeerIDFromString(string(pk[:20])), ID: bittorrent.PeerIDFromString(string(pk[:20])),
Port: binary.BigEndian.Uint16([]byte(pk[20:22])), Port: binary.BigEndian.Uint16([]byte(pk[20:22])),
IP: bittorrent.IP{IP: net.IP(pk[22:])}} IP: bittorrent.IP{IP: net.IP(pk[22:])},
}
if ip := peer.IP.To4(); ip != nil { if ip := peer.IP.To4(); ip != nil {
peer.IP.IP = ip peer.IP.IP = ip
@ -300,7 +302,7 @@ func (ps *peerStore) populateProm() {
defer conn.Close() defer conn.Close()
for _, group := range ps.groups() { for _, group := range ps.groups() {
if n, err := redis.Int64(conn.Do("GET", ps.infohashCountKey(group))); err != nil && err != redis.ErrNil { if n, err := redis.Int64(conn.Do("GET", ps.infohashCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
log.Error("storage: GET counter failure", log.Fields{ log.Error("storage: GET counter failure", log.Fields{
"key": ps.infohashCountKey(group), "key": ps.infohashCountKey(group),
"error": err, "error": err,
@ -308,7 +310,7 @@ func (ps *peerStore) populateProm() {
} else { } else {
numInfohashes += n numInfohashes += n
} }
if n, err := redis.Int64(conn.Do("GET", ps.seederCountKey(group))); err != nil && err != redis.ErrNil { if n, err := redis.Int64(conn.Do("GET", ps.seederCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
log.Error("storage: GET counter failure", log.Fields{ log.Error("storage: GET counter failure", log.Fields{
"key": ps.seederCountKey(group), "key": ps.seederCountKey(group),
"error": err, "error": err,
@ -316,7 +318,7 @@ func (ps *peerStore) populateProm() {
} else { } else {
numSeeders += n numSeeders += n
} }
if n, err := redis.Int64(conn.Do("GET", ps.leecherCountKey(group))); err != nil && err != redis.ErrNil { if n, err := redis.Int64(conn.Do("GET", ps.leecherCountKey(group))); err != nil && !errors.Is(err, redis.ErrNil) {
log.Error("storage: GET counter failure", log.Fields{ log.Error("storage: GET counter failure", log.Fields{
"key": ps.leecherCountKey(group), "key": ps.leecherCountKey(group),
"error": err, "error": err,
@ -356,9 +358,9 @@ func (ps *peerStore) PutSeeder(ih bittorrent.InfoHash, p bittorrent.Peer) error
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
conn.Send("MULTI") _ = conn.Send("MULTI")
conn.Send("HSET", encodedSeederInfoHash, pk, ct) _ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct) _ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
reply, err := redis.Int64s(conn.Do("EXEC")) reply, err := redis.Int64s(conn.Do("EXEC"))
if err != nil { if err != nil {
return err return err
@ -437,9 +439,9 @@ func (ps *peerStore) PutLeecher(ih bittorrent.InfoHash, p bittorrent.Peer) error
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
conn.Send("MULTI") _ = conn.Send("MULTI")
conn.Send("HSET", encodedLeecherInfoHash, pk, ct) _ = conn.Send("HSET", encodedLeecherInfoHash, pk, ct)
conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct) _ = conn.Send("HSET", addressFamily, encodedLeecherInfoHash, ct)
reply, err := redis.Int64s(conn.Do("EXEC")) reply, err := redis.Int64s(conn.Do("EXEC"))
if err != nil { if err != nil {
return err return err
@ -509,10 +511,10 @@ func (ps *peerStore) GraduateLeecher(ih bittorrent.InfoHash, p bittorrent.Peer)
conn := ps.rb.open() conn := ps.rb.open()
defer conn.Close() defer conn.Close()
conn.Send("MULTI") _ = conn.Send("MULTI")
conn.Send("HDEL", encodedLeecherInfoHash, pk) _ = conn.Send("HDEL", encodedLeecherInfoHash, pk)
conn.Send("HSET", encodedSeederInfoHash, pk, ct) _ = conn.Send("HSET", encodedSeederInfoHash, pk, ct)
conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct) _ = conn.Send("HSET", addressFamily, encodedSeederInfoHash, ct)
reply, err := redis.Int64s(conn.Do("EXEC")) reply, err := redis.Int64s(conn.Do("EXEC"))
if err != nil { if err != nil {
return err return err
@ -782,13 +784,13 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error {
// in other words, it's removed automatically after `HDEL` the last field. // in other words, it's removed automatically after `HDEL` the last field.
//_, err := conn.Do("DEL", ihStr) //_, err := conn.Do("DEL", ihStr)
conn.Send("MULTI") _ = conn.Send("MULTI")
conn.Send("HDEL", group, ihStr) _ = conn.Send("HDEL", group, ihStr)
if isSeeder { if isSeeder {
conn.Send("DECR", ps.infohashCountKey(group)) _ = conn.Send("DECR", ps.infohashCountKey(group))
} }
_, err = redis.Values(conn.Do("EXEC")) _, err = redis.Values(conn.Do("EXEC"))
if err != nil && err != redis.ErrNil { if err != nil && !errors.Is(err, redis.ErrNil) {
log.Error("storage: Redis EXEC failure", log.Fields{ log.Error("storage: Redis EXEC failure", log.Fields{
"group": group, "group": group,
"infohash": ihStr, "infohash": ihStr,
@ -796,7 +798,7 @@ func (ps *peerStore) collectGarbage(cutoff time.Time) error {
}) })
} }
} else { } else {
if _, err = conn.Do("UNWATCH"); err != nil && err != redis.ErrNil { if _, err = conn.Do("UNWATCH"); err != nil && !errors.Is(err, redis.ErrNil) {
log.Error("storage: Redis UNWATCH failure", log.Fields{"error": err}) log.Error("storage: Redis UNWATCH failure", log.Fields{"error": err})
} }
} }

View file

@ -23,7 +23,8 @@ func createNew() s.PeerStore {
RedisBroker: redisURL, RedisBroker: redisURL,
RedisReadTimeout: 10 * time.Second, RedisReadTimeout: 10 * time.Second,
RedisWriteTimeout: 10 * time.Second, RedisWriteTimeout: 10 * time.Second,
RedisConnectTimeout: 10 * time.Second}) RedisConnectTimeout: 10 * time.Second,
})
if err != nil { if err != nil {
panic(err) panic(err)
} }

View file

@ -69,7 +69,7 @@ func (rc *redisConnector) NewPool() *redis.Pool {
}, },
// PINGs connections that have been idle more than 10 seconds // PINGs connections that have been idle more than 10 seconds
TestOnBorrow: func(c redis.Conn, t time.Time) error { TestOnBorrow: func(c redis.Conn, t time.Time) error {
if time.Since(t) < time.Duration(10*time.Second) { if time.Since(t) < 10*time.Second {
return nil return nil
} }
_, err := c.Do("PING") _, err := c.Do("PING")
@ -80,7 +80,7 @@ func (rc *redisConnector) NewPool() *redis.Pool {
// Open a new Redis connection // Open a new Redis connection
func (rc *redisConnector) open() (redis.Conn, error) { func (rc *redisConnector) open() (redis.Conn, error) {
var opts = []redis.DialOption{ opts := []redis.DialOption{
redis.DialDatabase(rc.URL.DB), redis.DialDatabase(rc.URL.DB),
redis.DialReadTimeout(rc.ReadTimeout), redis.DialReadTimeout(rc.ReadTimeout),
redis.DialWriteTimeout(rc.WriteTimeout), redis.DialWriteTimeout(rc.WriteTimeout),
@ -119,7 +119,7 @@ func parseRedisURL(target string) (*redisURL, error) {
return nil, errors.New("no redis scheme found") return nil, errors.New("no redis scheme found")
} }
db := 0 //default redis db db := 0 // default redis db
parts := strings.Split(u.Path, "/") parts := strings.Split(u.Path, "/")
if len(parts) != 1 { if len(parts) != 1 {
db, err = strconv.Atoi(parts[1]) db, err = strconv.Atoi(parts[1])

View file

@ -53,8 +53,10 @@ func generatePeers() (a [1000]bittorrent.Peer) {
return return
} }
type executionFunc func(int, PeerStore, *benchData) error type (
type setupFunc func(PeerStore, *benchData) error executionFunc func(int, PeerStore, *benchData) error
setupFunc func(PeerStore, *benchData) error
)
func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef executionFunc) { func runBenchmark(b *testing.B, ps PeerStore, parallel bool, sf setupFunc, ef executionFunc) {
bd := &benchData{generateInfohashes(), generatePeers()} bd := &benchData{generateInfohashes(), generatePeers()}
@ -185,6 +187,7 @@ func PutDelete1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error { runBenchmark(b, ps, false, nil, func(i int, ps PeerStore, bd *benchData) error {
err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0]) err := ps.PutSeeder(bd.infohashes[i%1000], bd.peers[0])
if err != nil { if err != nil {
return err
} }
return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0]) return ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
}) })
@ -211,7 +214,7 @@ func PutDelete1kInfohash1k(b *testing.B, ps PeerStore) {
// DeleteNonexist can run in parallel. // DeleteNonexist can run in parallel.
func DeleteNonexist(b *testing.B, ps PeerStore) { func DeleteNonexist(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[0], bd.peers[0]) _ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[0])
return nil return nil
}) })
} }
@ -222,7 +225,7 @@ func DeleteNonexist(b *testing.B, ps PeerStore) {
// DeleteNonexist can run in parallel. // DeleteNonexist can run in parallel.
func DeleteNonexist1k(b *testing.B, ps PeerStore) { func DeleteNonexist1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000]) _ = ps.DeleteSeeder(bd.infohashes[0], bd.peers[i%1000])
return nil return nil
}) })
} }
@ -233,7 +236,7 @@ func DeleteNonexist1k(b *testing.B, ps PeerStore) {
// DeleteNonexist1kInfohash can run in parallel. // DeleteNonexist1kInfohash can run in parallel.
func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) { func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0]) _ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[0])
return nil return nil
}) })
} }
@ -244,7 +247,7 @@ func DeleteNonexist1kInfohash(b *testing.B, ps PeerStore) {
// DeleteNonexist1kInfohash1k can run in parallel. // DeleteNonexist1kInfohash1k can run in parallel.
func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) { func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) _ = ps.DeleteSeeder(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
return nil return nil
}) })
} }
@ -255,7 +258,7 @@ func DeleteNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
// GradNonexist can run in parallel. // GradNonexist can run in parallel.
func GradNonexist(b *testing.B, ps PeerStore) { func GradNonexist(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[0], bd.peers[0]) _ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[0])
return nil return nil
}) })
} }
@ -266,7 +269,7 @@ func GradNonexist(b *testing.B, ps PeerStore) {
// GradNonexist1k can run in parallel. // GradNonexist1k can run in parallel.
func GradNonexist1k(b *testing.B, ps PeerStore) { func GradNonexist1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000]) _ = ps.GraduateLeecher(bd.infohashes[0], bd.peers[i%1000])
return nil return nil
}) })
} }
@ -277,7 +280,7 @@ func GradNonexist1k(b *testing.B, ps PeerStore) {
// GradNonexist1kInfohash can run in parallel. // GradNonexist1kInfohash can run in parallel.
func GradNonexist1kInfohash(b *testing.B, ps PeerStore) { func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0]) _ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[0])
return nil return nil
}) })
} }
@ -289,7 +292,7 @@ func GradNonexist1kInfohash(b *testing.B, ps PeerStore) {
// GradNonexist1kInfohash1k can run in parallel. // GradNonexist1kInfohash1k can run in parallel.
func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) { func GradNonexist1kInfohash1k(b *testing.B, ps PeerStore) {
runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error { runBenchmark(b, ps, true, nil, func(i int, ps PeerStore, bd *benchData) error {
ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000]) _ = ps.GraduateLeecher(bd.infohashes[i%1000], bd.peers[(i*3)%1000])
return nil return nil
}) })
} }