Merge pull request #68 from lbryio/modular_reflector
Some checks failed
Go / build (push) Has been cancelled

Modular blobcache stores (huge changes)
This commit is contained in:
Niko 2025-08-22 02:22:03 +02:00 committed by GitHub
commit 68c0fe3b97
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
59 changed files with 344016 additions and 1599 deletions

View file

@ -16,7 +16,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: 1.22.x
go-version: 1.23.x
- name: Build linux
run: make linux

View file

@ -15,7 +15,7 @@ jobs:
- name: Set up Go
uses: actions/setup-go@v4
with:
go-version: 1.22.x
go-version: 1.23.x
- name: Build linux
run: make linux

8
.gitignore vendored
View file

@ -2,3 +2,11 @@
/config.json*
/dist
/bin
blobcache.yaml
reflector.yaml
upload.yaml
remotereflector.yaml

View file

@ -3,7 +3,7 @@ dist: bionic
language: go
go:
- 1.22.x
- 1.23.x
cache:
directories:

56
cmd/blobcache.go Normal file
View file

@ -0,0 +1,56 @@
package cmd
import (
"os"
"os/signal"
"strconv"
"syscall"
"github.com/lbryio/reflector.go/config"
"github.com/lbryio/reflector.go/internal/metrics"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var ()
func init() {
var cmd = &cobra.Command{
Use: "blobcache",
Short: "Run blobcache server",
Run: blobcacheCmd,
}
cmd.Flags().IntVar(&metricsPort, "metrics-port", 2112, "The port reflector will use for prometheus metrics")
cmd.Flags().BoolVar(&disableBlocklist, "disable-blocklist", false, "Disable blocklist watching/updating")
rootCmd.AddCommand(cmd)
}
func blobcacheCmd(cmd *cobra.Command, args []string) {
store, err := config.LoadStores(conf, "blobcache")
if err != nil {
log.Fatal(err)
}
defer store.Shutdown()
servers, err := config.LoadServers(store, conf, "blobcache")
if err != nil {
log.Fatal(err)
}
for _, s := range servers {
err = s.Start()
if err != nil {
log.Fatal(err)
}
defer s.Shutdown()
}
metricsServer := metrics.NewServer(":"+strconv.Itoa(metricsPort), "/metrics")
metricsServer.Start()
defer metricsServer.Shutdown()
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)
<-interruptChan
}

View file

@ -5,7 +5,6 @@ import (
"os"
"time"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/stream"
@ -28,11 +27,19 @@ func getStreamCmd(cmd *cobra.Command, args []string) {
addr := args[0]
sdHash := args[1]
s := store.NewCachingStore(
"getstream",
peer.NewStore(peer.StoreOpts{Address: addr}),
store.NewDiskStore("/tmp/lbry_downloaded_blobs", 2),
)
s := store.NewCachingStore(store.CachingParams{
Name: "getstream",
Cache: store.NewPeerStore(store.PeerParams{
Name: "getstream",
Address: addr,
Timeout: 30 * time.Second,
}),
Origin: store.NewDiskStore(store.DiskParams{
Name: "getstream",
MountPoint: "/tmp/lbry_downloaded_blobs",
ShardingSize: 2,
}),
})
wd, err := os.Getwd()
if err != nil {

View file

@ -1,55 +0,0 @@
package cmd
import (
"os"
"os/signal"
"strconv"
"syscall"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var peerNoDB bool
func init() {
var cmd = &cobra.Command{
Use: "peer",
Short: "Run peer server",
Run: peerCmd,
}
cmd.Flags().BoolVar(&peerNoDB, "nodb", false, "Don't connect to a db and don't use a db-backed blob store")
rootCmd.AddCommand(cmd)
}
func peerCmd(cmd *cobra.Command, args []string) {
var err error
s3 := store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName, globalConfig.S3Endpoint)
peerServer := peer.NewServer(s3)
if !peerNoDB {
db := &db.SQL{
LogQueries: log.GetLevel() == log.DebugLevel,
}
err = db.Connect(globalConfig.DBConn)
checkErr(err)
combo := store.NewDBBackedStore(s3, db, false)
peerServer = peer.NewServer(combo)
}
err = peerServer.Start(":" + strconv.Itoa(peer.DefaultPort))
if err != nil {
log.Fatal(err)
}
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)
<-interruptChan
peerServer.Shutdown()
}

View file

@ -32,9 +32,9 @@ func populateDbCmd(cmd *cobra.Command, args []string) {
log.Fatal("store-path must be defined")
}
localDb := &db.SQL{
SoftDelete: true,
TrackAccess: db.TrackAccessBlobs,
LogQueries: log.GetLevel() == log.DebugLevel,
SoftDelete: true,
TrackingLevel: db.TrackAccessBlobs,
LogQueries: log.GetLevel() == log.DebugLevel,
}
err := localDb.Connect("reflector:reflector@tcp(localhost:3306)/reflector")
if err != nil {

View file

@ -4,378 +4,73 @@ import (
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"time"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/server/http"
"github.com/lbryio/reflector.go/server/http3"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/c2h5oh/datasize"
"github.com/lbryio/reflector.go/config"
"github.com/lbryio/reflector.go/internal/metrics"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
var (
//port configuration
tcpPeerPort int
http3PeerPort int
httpPeerPort int
receiverPort int
metricsPort int
receiverPort int
metricsPort int
//flags configuration
disableUploads bool
disableBlocklist bool
useDB bool
//upstream configuration
upstreamReflector string
upstreamProtocol string
upstreamEdgeToken string
//downstream configuration
requestQueueSize int
//upstream edge configuration (to "cold" storage)
originEndpoint string
originEndpointFallback string
//cache configuration
diskCache string
secondaryDiskCache string
memCache int
)
var cacheManagers = []string{"localdb", "lfu", "arc", "lru", "simple"}
var cacheMangerToGcache = map[string]store.EvictionStrategy{
"lfu": store.LFU,
"arc": store.ARC,
"lru": store.LRU,
"simple": store.SIMPLE,
}
func init() {
var cmd = &cobra.Command{
Use: "reflector",
Use: "reflector2",
Short: "Run reflector server",
Run: reflectorCmd,
Run: reflector2Cmd,
}
cmd.Flags().IntVar(&tcpPeerPort, "tcp-peer-port", 5567, "The port reflector will distribute content from for the TCP (LBRY) protocol")
cmd.Flags().IntVar(&http3PeerPort, "http3-peer-port", 5568, "The port reflector will distribute content from over HTTP3 protocol")
cmd.Flags().IntVar(&httpPeerPort, "http-peer-port", 5569, "The port reflector will distribute content from over HTTP protocol")
cmd.Flags().IntVar(&receiverPort, "receiver-port", 5566, "The port reflector will receive content from")
cmd.Flags().IntVar(&metricsPort, "metrics-port", 2112, "The port reflector will use for prometheus metrics")
cmd.Flags().BoolVar(&disableUploads, "disable-uploads", false, "Disable uploads to this reflector server")
cmd.Flags().IntVar(&receiverPort, "receiver-port", 5566, "The port reflector will receive content from")
cmd.Flags().BoolVar(&disableBlocklist, "disable-blocklist", false, "Disable blocklist watching/updating")
cmd.Flags().BoolVar(&useDB, "use-db", true, "Whether to connect to the reflector db or not")
cmd.Flags().StringVar(&upstreamReflector, "upstream-reflector", "", "host:port of a reflector server where blobs are fetched from")
cmd.Flags().StringVar(&upstreamProtocol, "upstream-protocol", "http", "protocol used to fetch blobs from another upstream reflector server (tcp/http3/http)")
cmd.Flags().StringVar(&upstreamEdgeToken, "upstream-edge-token", "", "token used to retrieve/authenticate protected content")
cmd.Flags().IntVar(&requestQueueSize, "request-queue-size", 200, "How many concurrent requests from downstream should be handled at once (the rest will wait)")
cmd.Flags().StringVar(&originEndpoint, "origin-endpoint", "", "HTTP edge endpoint for standard HTTP retrieval")
cmd.Flags().StringVar(&originEndpointFallback, "origin-endpoint-fallback", "", "HTTP edge endpoint for standard HTTP retrieval if first origin fails")
cmd.Flags().StringVar(&diskCache, "disk-cache", "100GB:/tmp/downloaded_blobs:localdb", "Where to cache blobs on the file system. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfu/arc/lru)")
cmd.Flags().StringVar(&secondaryDiskCache, "optional-disk-cache", "", "Optional secondary file system cache for blobs. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfu/arc/lru) (this would get hit before the one specified in disk-cache)")
cmd.Flags().IntVar(&memCache, "mem-cache", 0, "enable in-memory cache with a max size of this many blobs")
rootCmd.AddCommand(cmd)
}
func reflectorCmd(cmd *cobra.Command, args []string) {
log.Printf("reflector %s", meta.VersionString())
func reflector2Cmd(cmd *cobra.Command, args []string) {
store, err := config.LoadStores(conf, "reflector")
if err != nil {
log.Fatal(err)
}
defer store.Shutdown()
// the blocklist logic requires the db backed store to be the outer-most store
underlyingStore := initStores()
underlyingStoreWithCaches, cleanerStopper := initCaches(underlyingStore)
if !disableUploads {
reflectorServer := reflector.NewServer(underlyingStore, underlyingStoreWithCaches)
reflectorServer.Timeout = 3 * time.Minute
reflectorServer.EnableBlocklist = !disableBlocklist
err := reflectorServer.Start(":" + strconv.Itoa(receiverPort))
servers, err := config.LoadServers(store, conf, "reflector")
if err != nil {
log.Fatal(err)
}
for _, s := range servers {
err = s.Start()
if err != nil {
log.Fatal(err)
}
defer reflectorServer.Shutdown()
defer s.Shutdown()
}
peerServer := peer.NewServer(underlyingStoreWithCaches)
err := peerServer.Start(":" + strconv.Itoa(tcpPeerPort))
reflectorServer := reflector.NewIngestionServer(store)
reflectorServer.Timeout = 3 * time.Minute
reflectorServer.EnableBlocklist = !disableBlocklist
err = reflectorServer.Start(":" + strconv.Itoa(receiverPort))
if err != nil {
log.Fatal(err)
}
defer peerServer.Shutdown()
http3PeerServer := http3.NewServer(underlyingStoreWithCaches, requestQueueSize)
err = http3PeerServer.Start(":" + strconv.Itoa(http3PeerPort))
if err != nil {
log.Fatal(err)
}
defer http3PeerServer.Shutdown()
httpServer := http.NewServer(store.WithSingleFlight("sf-http", underlyingStoreWithCaches), requestQueueSize, upstreamEdgeToken)
err = httpServer.Start(":" + strconv.Itoa(httpPeerPort))
if err != nil {
log.Fatal(err)
}
defer httpServer.Shutdown()
defer reflectorServer.Shutdown()
metricsServer := metrics.NewServer(":"+strconv.Itoa(metricsPort), "/metrics")
metricsServer.Start()
defer metricsServer.Shutdown()
defer underlyingStoreWithCaches.Shutdown()
defer underlyingStore.Shutdown() //do we actually need this? Oo
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)
<-interruptChan
// deferred shutdowns happen now
cleanerStopper.StopAndWait()
}
func initUpstreamStore() store.BlobStore {
var s store.BlobStore
if upstreamReflector == "" {
return nil
}
switch upstreamProtocol {
case "tcp":
s = peer.NewStore(peer.StoreOpts{
Address: upstreamReflector,
Timeout: 30 * time.Second,
})
case "http3":
s = http3.NewStore(http3.StoreOpts{
Address: upstreamReflector,
Timeout: 30 * time.Second,
})
case "http":
s = store.NewHttpStore(upstreamReflector, upstreamEdgeToken)
default:
log.Fatalf("protocol is not recognized: %s", upstreamProtocol)
}
return s
}
func initEdgeStore() store.BlobStore {
var s3Store *store.S3Store
var s store.BlobStore
if conf != "none" {
s3Store = store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName, globalConfig.S3Endpoint)
}
if originEndpointFallback != "" && originEndpoint != "" {
ittt := store.NewITTTStore(store.NewCloudFrontROStore(originEndpoint), store.NewCloudFrontROStore(originEndpointFallback))
if s3Store != nil {
s = store.NewCloudFrontRWStore(ittt, s3Store)
} else {
s = ittt
}
} else if s3Store != nil {
s = s3Store
} else {
log.Fatalf("this configuration does not include a valid upstream source")
}
return s
}
func initDBStore(s store.BlobStore) store.BlobStore {
if useDB {
dbInst := &db.SQL{
TrackAccess: db.TrackAccessStreams,
LogQueries: log.GetLevel() == log.DebugLevel,
}
err := dbInst.Connect(globalConfig.DBConn)
if err != nil {
log.Fatal(err)
}
s = store.NewDBBackedStore(s, dbInst, false)
}
return s
}
func initStores() store.BlobStore {
s := initUpstreamStore()
if s == nil {
s = initEdgeStore()
}
s = initDBStore(s)
return s
}
// initCaches returns a store wrapped with caches and a stop group to execute a clean shutdown
func initCaches(s store.BlobStore) (store.BlobStore, *stop.Group) {
stopper := stop.New()
diskStore := initDiskStore(s, diskCache, stopper)
finalStore := initDiskStore(diskStore, secondaryDiskCache, stopper)
stop.New()
if memCache > 0 {
finalStore = store.NewCachingStore(
"reflector",
finalStore,
store.NewGcacheStore("mem", store.NewMemStore(), memCache, store.LRU),
)
}
return finalStore, stopper
}
func initDiskStore(upstreamStore store.BlobStore, diskParams string, stopper *stop.Group) store.BlobStore {
diskCacheMaxSize, diskCachePath, cacheManager := diskCacheParams(diskParams)
//we are tracking blobs in memory with a 1 byte long boolean, which means that for each 2MB (a blob) we need 1Byte
// so if the underlying cache holds 10MB, 10MB/2MB=5Bytes which is also the exact count of objects to restore on startup
realCacheSize := float64(diskCacheMaxSize) / float64(stream.MaxBlobSize)
if diskCacheMaxSize == 0 {
return upstreamStore
}
err := os.MkdirAll(diskCachePath, os.ModePerm)
if err != nil {
log.Fatal(err)
}
diskStore := store.NewDiskStore(diskCachePath, 2)
var unwrappedStore store.BlobStore
cleanerStopper := stop.New(stopper)
if cacheManager == "localdb" {
localDb := &db.SQL{
SoftDelete: true,
TrackAccess: db.TrackAccessBlobs,
LogQueries: log.GetLevel() == log.DebugLevel,
}
err = localDb.Connect("reflector:reflector@tcp(localhost:3306)/reflector")
if err != nil {
log.Fatal(err)
}
unwrappedStore = store.NewDBBackedStore(diskStore, localDb, true)
go cleanOldestBlobs(int(realCacheSize), localDb, unwrappedStore, cleanerStopper)
} else {
unwrappedStore = store.NewGcacheStore("nvme", store.NewDiskStore(diskCachePath, 2), int(realCacheSize), cacheMangerToGcache[cacheManager])
}
wrapped := store.NewCachingStore(
"reflector",
upstreamStore,
unwrappedStore,
)
return wrapped
}
func diskCacheParams(diskParams string) (int, string, string) {
if diskParams == "" {
return 0, "", ""
}
parts := strings.Split(diskParams, ":")
if len(parts) != 3 {
log.Fatalf("%s does is formatted incorrectly. Expected format: 'sizeGB:CACHE_PATH:cachemanager' for example: '100GB:/tmp/downloaded_blobs:localdb'", diskParams)
}
diskCacheSize := parts[0]
path := parts[1]
cacheManager := parts[2]
if len(path) == 0 || path[0] != '/' {
log.Fatalf("disk cache paths must start with '/'")
}
if !util.InSlice(cacheManager, cacheManagers) {
log.Fatalf("specified cache manager '%s' is not supported. Use one of the following: %v", cacheManager, cacheManagers)
}
var maxSize datasize.ByteSize
err := maxSize.UnmarshalText([]byte(diskCacheSize))
if err != nil {
log.Fatal(err)
}
if maxSize <= 0 {
log.Fatal("disk cache size must be more than 0")
}
return int(maxSize), path, cacheManager
}
func cleanOldestBlobs(maxItems int, db *db.SQL, store store.BlobStore, stopper *stop.Group) {
// this is so that it runs on startup without having to wait for 10 minutes
err := doClean(maxItems, db, store, stopper)
if err != nil {
log.Error(errors.FullTrace(err))
}
const cleanupInterval = 10 * time.Minute
for {
select {
case <-stopper.Ch():
log.Infoln("stopping self cleanup")
return
case <-time.After(cleanupInterval):
err := doClean(maxItems, db, store, stopper)
if err != nil {
log.Error(errors.FullTrace(err))
}
}
}
}
func doClean(maxItems int, db *db.SQL, store store.BlobStore, stopper *stop.Group) error {
blobsCount, err := db.Count()
if err != nil {
return err
}
if blobsCount >= maxItems {
itemsToDelete := blobsCount / 10
blobs, err := db.LeastRecentlyAccessedHashes(itemsToDelete)
if err != nil {
return err
}
blobsChan := make(chan string, len(blobs))
wg := &stop.Group{}
go func() {
for _, hash := range blobs {
select {
case <-stopper.Ch():
return
default:
}
blobsChan <- hash
}
close(blobsChan)
}()
for i := 0; i < 3; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for h := range blobsChan {
select {
case <-stopper.Ch():
return
default:
}
err = store.Delete(h)
if err != nil {
log.Errorf("error pruning %s: %s", h, errors.FullTrace(err))
continue
}
}
}()
}
wg.Wait()
}
return nil
}

View file

@ -1,17 +1,14 @@
package cmd
import (
"encoding/json"
"os"
"strings"
"github.com/lbryio/reflector.go/updater"
"github.com/spf13/viper"
"github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/util"
"github.com/johntdyer/slackrus"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
@ -39,7 +36,8 @@ const (
)
var conf string
var globalConfig Config
//var globalConfig Config
var rootCmd = &cobra.Command{
Use: "prism",
@ -52,7 +50,7 @@ var rootCmd = &cobra.Command{
func init() {
rootCmd.PersistentFlags().StringSliceVarP(&verbose, "verbose", "v", []string{}, "Verbose logging for specific components")
rootCmd.PersistentFlags().StringVar(&conf, "conf", "config.json", "Path to config. Use 'none' to disable")
rootCmd.PersistentFlags().StringVar(&conf, "conf-dir", "./", "Path to config directory")
}
// Execute adds all child commands to the root command and sets flags appropriately.
@ -87,38 +85,40 @@ func preRun(cmd *cobra.Command, args []string) {
}
}
var err error
//var err error
if conf == "" {
logrus.Errorln("--conf flag required")
os.Exit(1)
} else if conf != "none" {
globalConfig, err = loadConfig(conf)
if err != nil {
logrus.Error(err)
os.Exit(1)
}
}
//else if conf != "none" {
// globalConfig, err = loadConfig(conf)
// if err != nil {
// logrus.Error(err)
// os.Exit(1)
// }
//}
viper.AddConfigPath(conf)
if globalConfig.SlackHookURL != "" {
hook := &slackrus.SlackrusHook{
HookURL: globalConfig.SlackHookURL,
AcceptedLevels: slackrus.LevelThreshold(logrus.InfoLevel),
Channel: globalConfig.SlackChannel,
//IconEmoji: ":ghost:",
//Username: "reflector.go",
}
//logrus.SetFormatter(&logrus.JSONFormatter{})
logrus.AddHook(hook)
debugLogger.AddHook(hook)
}
//if globalConfig.SlackHookURL != "" {
// hook := &slackrus.SlackrusHook{
// HookURL: globalConfig.SlackHookURL,
// AcceptedLevels: slackrus.LevelThreshold(logrus.InfoLevel),
// Channel: globalConfig.SlackChannel,
// //IconEmoji: ":ghost:",
// //Username: "reflector.go",
// }
// //logrus.SetFormatter(&logrus.JSONFormatter{})
// logrus.AddHook(hook)
// debugLogger.AddHook(hook)
//}
if globalConfig.UpdateBinURL != "" {
if globalConfig.UpdateCmd == "" {
logrus.Warnln("update_cmd is empty in conf file")
}
logrus.Println("starting update checker")
go updater.Run(globalConfig.UpdateBinURL, globalConfig.UpdateCmd)
}
//if globalConfig.UpdateBinURL != "" {
// if globalConfig.UpdateCmd == "" {
// logrus.Warnln("update_cmd is empty in conf file")
// }
// logrus.Println("starting update checker")
// go updater.Run(globalConfig.UpdateBinURL, globalConfig.UpdateCmd)
//}
}
func checkErr(err error) {
@ -139,20 +139,20 @@ func argFuncs(funcs ...cobra.PositionalArgs) cobra.PositionalArgs {
}
}
func loadConfig(path string) (Config, error) {
var c Config
raw, err := os.ReadFile(path)
if err != nil {
if os.IsNotExist(err) {
return c, errors.Err("config file not found")
}
return c, errors.Err(err)
}
err = json.Unmarshal(raw, &c)
return c, errors.Err(err)
}
//func loadConfig(path string) (Config, error) {
// var c Config
//
// raw, err := os.ReadFile(path)
// if err != nil {
// if os.IsNotExist(err) {
// return c, errors.Err("config file not found")
// }
// return c, errors.Err(err)
// }
//
// err = json.Unmarshal(raw, &c)
// return c, errors.Err(err)
//}
func mustGetFlagString(cmd *cobra.Command, name string) string {
v, err := cmd.Flags().GetString(name)

View file

@ -1,102 +0,0 @@
package cmd
import (
"os"
"os/signal"
"strconv"
"strings"
"syscall"
"github.com/lbryio/reflector.go/cluster"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/prism"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/dht"
"github.com/lbryio/lbry.go/v2/dht/bits"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
)
const (
startNewCluster = "new"
)
var (
startClusterPort int
startPeerPort int
startReflectorPort int
startDhtPort int
startDhtSeeds []string
startHashRange string
)
func init() {
var cmd = &cobra.Command{
Use: `start [cluster-address|"new"]`,
Short: "Runs full prism application with cluster, dht, peer server, and reflector server.",
Run: startCmd,
Args: cobra.ExactArgs(1),
}
cmd.PersistentFlags().IntVar(&startClusterPort, "cluster-port", cluster.DefaultPort, "Port that cluster listens on")
cmd.PersistentFlags().IntVar(&startPeerPort, "peer-port", peer.DefaultPort, "Port to start peer protocol on")
cmd.PersistentFlags().IntVar(&startReflectorPort, "reflector-port", reflector.DefaultPort, "Port to start reflector protocol on")
cmd.PersistentFlags().IntVar(&startDhtPort, "dht-port", dht.DefaultPort, "Port that dht will listen on")
cmd.PersistentFlags().StringSliceVar(&startDhtSeeds, "dht-seeds", []string{}, "Comma-separated list of dht seed nodes (addr:port,addr:port,...)")
cmd.PersistentFlags().StringVar(&startHashRange, "hash-range", "", "Limit on range of hashes to announce (start-end)")
rootCmd.AddCommand(cmd)
}
func startCmd(cmd *cobra.Command, args []string) {
db := &db.SQL{
LogQueries: log.GetLevel() == log.DebugLevel,
}
err := db.Connect(globalConfig.DBConn)
checkErr(err)
s3 := store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName, globalConfig.S3Endpoint)
comboStore := store.NewDBBackedStore(s3, db, false)
conf := prism.DefaultConf()
// TODO: args we need:
// minNodes - minimum number of nodes before announcing starts. otherwise first node will try to announce all the blobs in the db
// or maybe we should do maxHashesPerNode?
// in either case, this should not kill the cluster, but should only limit announces (and notify when some hashes are being left unannounced)
if args[0] != startNewCluster {
conf.ClusterSeedAddr = args[0]
}
conf.DB = db
conf.Blobs = comboStore
conf.DhtAddress = "0.0.0.0:" + strconv.Itoa(startDhtPort)
conf.DhtSeedNodes = startDhtSeeds
conf.ClusterPort = startClusterPort
conf.PeerPort = startPeerPort
conf.ReflectorPort = startReflectorPort
if startHashRange != "" {
hashRange := strings.Split(startHashRange, "-")
if len(hashRange) != 2 {
log.Fatal("invalid hash range")
}
r := bits.Range{Start: bits.FromShortHexP(hashRange[0]), End: bits.FromShortHexP(hashRange[1])}
conf.HashRange = &r
}
p := prism.New(conf)
err = p.Start()
if err != nil {
log.Fatal(err)
}
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)
<-interruptChan
p.Shutdown()
}

View file

@ -29,9 +29,9 @@ func init() {
func testCmd(cmd *cobra.Command, args []string) {
log.Printf("reflector %s", meta.VersionString())
memStore := store.NewMemStore()
memStore := store.NewMemStore(store.MemParams{Name: "test"})
reflectorServer := reflector.NewServer(memStore, memStore)
reflectorServer := reflector.NewIngestionServer(memStore)
reflectorServer.Timeout = 3 * time.Minute
err := reflectorServer.Start(":" + strconv.Itoa(reflector.DefaultPort))
@ -39,8 +39,8 @@ func testCmd(cmd *cobra.Command, args []string) {
log.Fatal(err)
}
peerServer := peer.NewServer(memStore)
err = peerServer.Start(":" + strconv.Itoa(reflector.DefaultPort+1))
peerServer := peer.NewServer(memStore, fmt.Sprintf(":%d", reflector.DefaultPort+1))
err = peerServer.Start()
if err != nil {
log.Fatal(err)
}

View file

@ -5,9 +5,9 @@ import (
"os/signal"
"syscall"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/config"
"github.com/lbryio/reflector.go/reflector"
"github.com/lbryio/reflector.go/store"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
@ -31,17 +31,18 @@ func init() {
}
func uploadCmd(cmd *cobra.Command, args []string) {
db := &db.SQL{
LogQueries: log.GetLevel() == log.DebugLevel,
store, err := config.LoadStores(conf, "upload")
if err != nil {
log.Fatal(err)
}
err := db.Connect(globalConfig.DBConn)
checkErr(err)
defer store.Shutdown()
st := store.NewDBBackedStore(
store.NewS3Store(globalConfig.AwsID, globalConfig.AwsSecret, globalConfig.BucketRegion, globalConfig.BucketName, globalConfig.S3Endpoint),
db, false)
databaseConn, err := config.LoadDatabase(conf, "upload")
if err != nil {
log.Fatal(err)
}
uploader := reflector.NewUploader(db, st, uploadWorkers, uploadSkipExistsCheck, uploadDeleteBlobsAfterUpload)
uploader := reflector.NewUploader(databaseConn, store, uploadWorkers, uploadSkipExistsCheck, uploadDeleteBlobsAfterUpload)
interruptChan := make(chan os.Signal, 1)
signal.Notify(interruptChan, os.Interrupt, syscall.SIGTERM)

114
config/loader.go Normal file
View file

@ -0,0 +1,114 @@
package config
import (
"fmt"
log "github.com/sirupsen/logrus"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/server"
"github.com/lbryio/reflector.go/server/http"
"github.com/lbryio/reflector.go/server/http3"
"github.com/lbryio/reflector.go/server/peer"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/spf13/viper"
)
func LoadStores(path, file string) (store.BlobStore, error) {
v := viper.New()
v.SetConfigType("yaml")
v.AddConfigPath(path)
v.SetConfigName(file)
err := v.ReadInConfig()
if err != nil {
return nil, errors.Err(err)
}
storeViper := v.Sub("store")
for storeType := range storeViper.AllSettings() {
factory, exists := store.Factories[storeType]
if !exists {
return nil, errors.Err("unknown store type: %s", storeType)
}
storeConfig := storeViper.Sub(storeType)
s, err := factory(storeConfig)
if err != nil {
return nil, errors.Err(err)
}
//we only expect 1 store as the root, so let's return it
return s, nil
}
return nil, nil
}
func LoadServers(store store.BlobStore, path, file string) ([]server.BlobServer, error) {
v := viper.New()
v.SetConfigType("yaml")
v.AddConfigPath(path)
v.SetConfigName(file)
err := v.ReadInConfig()
if err != nil {
return nil, errors.Err(err)
}
servers := make([]server.BlobServer, 0)
serversViper := v.Sub("servers")
for serverType := range serversViper.AllSettings() {
var cfg server.BlobServerConfig
err := serversViper.Sub(serverType).Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
switch serverType {
case "http":
servers = append(servers, http.NewServer(store, cfg.MaxConcurrentRequests, cfg.EdgeToken, fmt.Sprintf("%s:%d", cfg.Address, cfg.Port)))
case "http3":
servers = append(servers, http3.NewServer(store, cfg.MaxConcurrentRequests, fmt.Sprintf("%s:%d", cfg.Address, cfg.Port)))
case "peer":
servers = append(servers, peer.NewServer(store, fmt.Sprintf("%s:%d", cfg.Address, cfg.Port)))
default:
return nil, errors.Err("unknown server type: %s", serverType)
}
}
return servers, nil
}
func LoadDatabase(path, file string) (*db.SQL, error) {
v := viper.New()
v.SetConfigType("yaml")
v.AddConfigPath(path)
v.SetConfigName(file)
err := v.ReadInConfig()
if err != nil {
return nil, errors.Err(err)
}
dbConfig := v.Sub("database")
if dbConfig == nil {
return nil, errors.Err("db config not found")
}
user := dbConfig.GetString("user")
password := dbConfig.GetString("password")
host := dbConfig.GetString("host")
port := dbConfig.GetInt("port")
database := dbConfig.GetString("database")
logQueries := dbConfig.GetBool("log_queries")
accessTracking := dbConfig.GetInt("access_tracking")
if user == "" || password == "" || host == "" || port == 0 || database == "" {
return nil, errors.Err("db config is missing required fields")
}
dsn := fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", user, password, host, port, database)
dbInstance := &db.SQL{
TrackingLevel: db.AccessTrackingLevel(accessTracking),
SoftDelete: true,
LogQueries: logQueries || log.GetLevel() == log.DebugLevel,
}
err = dbInstance.Connect(dsn)
return dbInstance, err
}

View file

@ -35,12 +35,12 @@ type SdBlob struct {
StreamHash string `json:"stream_hash"`
}
type trackAccess int
type AccessTrackingLevel int
const (
TrackAccessNone trackAccess = iota // Don't track accesses
TrackAccessStreams // Track accesses at the stream level
TrackAccessBlobs // Track accesses at the blob level
TrackAccessNone AccessTrackingLevel = iota // Don't track accesses
TrackAccessStreams // Track accesses at the stream level
TrackAccessBlobs // Track accesses at the blob level
)
// SQL implements the DB interface
@ -48,7 +48,7 @@ type SQL struct {
conn *sql.DB
// Track the approx last time a blob or stream was accessed
TrackAccess trackAccess
TrackingLevel AccessTrackingLevel
// Instead of deleting a blob, marked it as not stored in the db
SoftDelete bool
@ -57,7 +57,7 @@ type SQL struct {
LogQueries bool
}
func (s SQL) logQuery(query string, args ...interface{}) {
func (s *SQL) logQuery(query string, args ...interface{}) {
if !s.LogQueries {
return
}
@ -179,7 +179,7 @@ func (s *SQL) insertBlob(hash string, length int, isStored bool) (int64, error)
q string
args []interface{}
)
if s.TrackAccess == TrackAccessBlobs {
if s.TrackingLevel == TrackAccessBlobs {
args = []interface{}{hash, isStored, length, time.Now()}
q = "INSERT INTO blob_ (hash, is_stored, length, last_accessed_at) VALUES (" + qt.Qs(len(args)) + ") ON DUPLICATE KEY UPDATE is_stored = (is_stored or VALUES(is_stored)), last_accessed_at = VALUES(last_accessed_at)"
} else {
@ -201,7 +201,7 @@ func (s *SQL) insertBlob(hash string, length int, isStored bool) (int64, error)
return 0, errors.Err("blob ID is 0 even after INSERTing and SELECTing")
}
if s.TrackAccess == TrackAccessBlobs {
if s.TrackingLevel == TrackAccessBlobs {
err := s.touchBlobs([]uint64{uint64(blobID)})
if err != nil {
return 0, errors.Err(err)
@ -218,7 +218,7 @@ func (s *SQL) insertStream(hash string, sdBlobID int64) (int64, error) {
args []interface{}
)
if s.TrackAccess == TrackAccessStreams {
if s.TrackingLevel == TrackAccessStreams {
args = []interface{}{hash, sdBlobID, time.Now()}
q = "INSERT IGNORE INTO stream (hash, sd_blob_id, last_accessed_at) VALUES (" + qt.Qs(len(args)) + ")"
} else {
@ -240,7 +240,7 @@ func (s *SQL) insertStream(hash string, sdBlobID int64) (int64, error) {
return 0, errors.Err("stream ID is 0 even after INSERTing and SELECTing")
}
if s.TrackAccess == TrackAccessStreams {
if s.TrackingLevel == TrackAccessStreams {
err := s.touchStreams([]uint64{uint64(streamID)})
if err != nil {
return 0, errors.Err(err)
@ -264,9 +264,9 @@ func (s *SQL) HasBlobs(hashes []string, touch bool) (map[string]bool, error) {
exists, idsNeedingTouch, err := s.hasBlobs(hashes)
if touch {
if s.TrackAccess == TrackAccessBlobs {
if s.TrackingLevel == TrackAccessBlobs {
_ = s.touchBlobs(idsNeedingTouch)
} else if s.TrackAccess == TrackAccessStreams {
} else if s.TrackingLevel == TrackAccessStreams {
_ = s.touchStreams(idsNeedingTouch)
}
}
@ -338,11 +338,11 @@ func (s *SQL) hasBlobs(hashes []string) (map[string]bool, []uint64, error) {
batch := hashes[doneIndex:sliceEnd]
var query string
if s.TrackAccess == TrackAccessBlobs {
if s.TrackingLevel == TrackAccessBlobs {
query = `SELECT b.hash, b.id, NULL, b.last_accessed_at
FROM blob_ b
WHERE b.is_stored = 1 and b.hash IN (` + qt.Qs(len(batch)) + `)`
} else if s.TrackAccess == TrackAccessStreams {
} else if s.TrackingLevel == TrackAccessStreams {
query = `SELECT b.hash, b.id, s.id, s.last_accessed_at
FROM blob_ b
LEFT JOIN stream_blob sb ON b.id = sb.blob_id
@ -377,9 +377,9 @@ WHERE b.is_stored = 1 and b.hash IN (` + qt.Qs(len(batch)) + `)`
}
exists[hash] = true
if !lastAccessedAt.Valid || lastAccessedAt.Time.Before(touchDeadline) {
if s.TrackAccess == TrackAccessBlobs {
if s.TrackingLevel == TrackAccessBlobs {
needsTouch = append(needsTouch, blobID)
} else if s.TrackAccess == TrackAccessStreams && !streamID.IsZero() {
} else if s.TrackingLevel == TrackAccessStreams && !streamID.IsZero() {
needsTouch = append(needsTouch, streamID.Uint64)
}
}
@ -424,7 +424,7 @@ func (s *SQL) LeastRecentlyAccessedHashes(maxBlobs int) ([]string, error) {
return nil, errors.Err("not connected")
}
if s.TrackAccess != TrackAccessBlobs {
if s.TrackingLevel != TrackAccessBlobs {
return nil, errors.Err("blob access tracking is disabled")
}

123
go.mod
View file

@ -1,15 +1,17 @@
module github.com/lbryio/reflector.go
go 1.22
go 1.23.0
toolchain go1.23.6
replace github.com/btcsuite/btcd => github.com/lbryio/lbrycrd.go v0.0.0-20200203050410-e1076f12bf19
require (
github.com/aws/aws-sdk-go v1.55.5
github.com/aws/aws-sdk-go v1.55.6
github.com/bluele/gcache v0.0.2
github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7
github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d
github.com/brk0v/directio v0.0.0-20241105172640-ae9d82eb8fee
github.com/btcsuite/btcd v0.24.2
github.com/btcsuite/btcutil v1.0.2
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db
@ -19,112 +21,107 @@ require (
github.com/golang/protobuf v1.5.4
github.com/google/gops v0.3.28
github.com/gorilla/mux v1.8.1
github.com/hashicorp/serf v0.10.1
github.com/hashicorp/serf v0.10.2
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf
github.com/johntdyer/slackrus v0.0.0-20230315191314-80bc92dee4fc
github.com/karrick/godirwalk v1.17.0
github.com/lbryio/chainquery v1.9.1-0.20240927170248-48c092515dea
github.com/lbryio/lbry.go/v2 v2.7.2-0.20230307181431-a01aa6dc0629
github.com/lbryio/types v0.0.0-20220224142228-73610f6654a6
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5
github.com/prometheus/client_golang v1.20.5
github.com/quic-go/quic-go v0.48.2
github.com/quic-go/quic-go v0.49.0
github.com/sirupsen/logrus v1.9.3
github.com/spf13/cast v1.6.0
github.com/spf13/cobra v1.8.0
github.com/stretchr/testify v1.9.0
github.com/spf13/cast v1.7.1
github.com/spf13/cobra v1.9.1
github.com/spf13/viper v1.20.1
github.com/stretchr/testify v1.10.0
github.com/volatiletech/null/v8 v8.1.2
go.uber.org/atomic v1.11.0
golang.org/x/sync v0.8.0
golang.org/x/sync v0.11.0
)
require (
filippo.io/edwards25519 v1.1.0 // indirect
github.com/armon/go-metrics v0.4.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect
github.com/btcsuite/btclog v1.0.0 // indirect
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd // indirect
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 // indirect
github.com/bytedance/sonic v1.11.6 // indirect
github.com/bytedance/sonic/loader v0.1.1 // indirect
github.com/bytedance/sonic v1.12.8 // indirect
github.com/bytedance/sonic/loader v0.2.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cloudwego/base64x v0.1.4 // indirect
github.com/cloudwego/iasm v0.2.0 // indirect
github.com/cloudwego/base64x v0.1.5 // indirect
github.com/friendsofgo/errors v0.9.2 // indirect
github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.3 // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-errors/errors v1.4.2 // indirect
github.com/fsnotify/fsnotify v1.8.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
github.com/gin-contrib/sse v1.0.0 // indirect
github.com/go-errors/errors v1.5.1 // indirect
github.com/go-ini/ini v1.67.0 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.20.0 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/gofrs/uuid v4.2.0+incompatible // indirect
github.com/google/btree v1.0.0 // indirect
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd // indirect
github.com/go-playground/validator/v10 v10.24.0 // indirect
github.com/go-task/slim-sprig/v3 v3.0.0 // indirect
github.com/go-viper/mapstructure/v2 v2.2.1 // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/gofrs/uuid v4.4.0+incompatible // indirect
github.com/google/btree v1.1.2 // indirect
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 // indirect
github.com/gorilla/rpc v1.2.0 // indirect
github.com/gorilla/websocket v1.5.1 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-immutable-radix v1.3.1 // indirect
github.com/hashicorp/go-msgpack v0.5.3 // indirect
github.com/hashicorp/go-metrics v0.5.4 // indirect
github.com/hashicorp/go-msgpack/v2 v2.1.2 // indirect
github.com/hashicorp/go-multierror v1.1.1 // indirect
github.com/hashicorp/go-sockaddr v1.0.0 // indirect
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/hashicorp/hcl v1.0.0 // indirect
github.com/hashicorp/memberlist v0.5.0 // indirect
github.com/hashicorp/go-sockaddr v1.0.5 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
github.com/hashicorp/memberlist v0.5.2 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/johntdyer/slack-go v0.0.0-20230314151037-c5bf334f9b6e // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.9 // indirect
github.com/klauspost/cpuid/v2 v2.2.7 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/klauspost/cpuid/v2 v2.2.9 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/miekg/dns v1.1.41 // indirect
github.com/miekg/dns v1.1.56 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/onsi/ginkgo/v2 v2.22.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/common v0.62.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/quic-go/qpack v0.5.1 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sagikazarmark/locafero v0.7.0 // indirect
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect
github.com/shopspring/decimal v1.3.1 // indirect
github.com/slack-go/slack v0.12.1 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/slack-go/slack v0.16.0 // indirect
github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/spf13/viper v1.18.2 // indirect
github.com/spf13/afero v1.12.0 // indirect
github.com/spf13/pflag v1.0.6 // indirect
github.com/subosito/gotenv v1.6.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/volatiletech/inflect v0.0.1 // indirect
github.com/volatiletech/randomize v0.0.1 // indirect
github.com/volatiletech/strmangle v0.0.6 // indirect
go.uber.org/mock v0.4.0 // indirect
go.uber.org/multierr v1.9.0 // indirect
golang.org/x/arch v0.8.0 // indirect
golang.org/x/crypto v0.26.0 // indirect
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect
golang.org/x/mod v0.17.0 // indirect
golang.org/x/net v0.28.0 // indirect
golang.org/x/sys v0.23.0 // indirect
golang.org/x/text v0.17.0 // indirect
golang.org/x/time v0.5.0 // indirect
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
google.golang.org/protobuf v1.34.2 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
github.com/volatiletech/strmangle v0.0.8 // indirect
go.uber.org/mock v0.5.0 // indirect
go.uber.org/multierr v1.11.0 // indirect
golang.org/x/arch v0.14.0 // indirect
golang.org/x/crypto v0.33.0 // indirect
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac // indirect
golang.org/x/mod v0.23.0 // indirect
golang.org/x/net v0.35.0 // indirect
golang.org/x/sys v0.30.0 // indirect
golang.org/x/text v0.22.0 // indirect
golang.org/x/time v0.8.0 // indirect
golang.org/x/tools v0.30.0 // indirect
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

343
go.sum
View file

@ -1,3 +1,4 @@
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA=
filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4=
github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
@ -6,27 +7,25 @@ github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuy
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/armon/go-metrics v0.4.1 h1:hR91U9KYmb6bLBYLQjyM+3j+rcd/UhE+G78SFnF8gJA=
github.com/armon/go-metrics v0.4.1/go.mod h1:E6amYzXo6aW1tqzoZGT755KkbgrJsSdpwZ+3JqfkOG4=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/aws/aws-sdk-go v1.55.5 h1:KKUZBfBoyqy5d3swXyiC7Q76ic40rYcbqH7qjh59kzU=
github.com/aws/aws-sdk-go v1.55.5/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/aws/aws-sdk-go v1.55.6 h1:cSg4pvZ3m8dgYcgqB97MrcdjUmZ1BeMYKUxMMB89IPk=
github.com/aws/aws-sdk-go v1.55.6/go.mod h1:eRwEWoyTWFMVYVQzKMNHWP5/RV4xIUGMQfXQHfHkpNU=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bluele/gcache v0.0.2 h1:WcbfdXICg7G/DGBh1PFfcirkWOQV+v077yF1pSy3DGw=
github.com/bluele/gcache v0.0.2/go.mod h1:m15KV+ECjptwSPxKhOhQoAFQVtUFjTVkc3H8o0t/fp0=
github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7 h1:7gNKWnX6OF+ERiXVw4I9RsHhZ52aumXdFE07nEx5v20=
github.com/brk0v/directio v0.0.0-20190225130936-69406e757cf7/go.mod h1:M/KA3XJG5PJaApPiv4gWNsgcSJquOQTqumZNLyYE0KM=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f h1:bAs4lUbRJpnnkd9VhRV3jjAVU7DJVjMaK+IsvSeZvFo=
github.com/brk0v/directio v0.0.0-20241105172640-ae9d82eb8fee h1:gwVgfLo8dvQMauElELV1b1kwMuUh2ETzX6weEbdAKgA=
github.com/brk0v/directio v0.0.0-20241105172640-ae9d82eb8fee/go.mod h1:M/KA3XJG5PJaApPiv4gWNsgcSJquOQTqumZNLyYE0KM=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d h1:yJzD/yFppdVCf6ApMkVy8cUxV0XrxdP9rVf6D87/Mng=
github.com/btcsuite/btclog v1.0.0 h1:sEkpKJMmfGiyZjADwEIgB1NSwMyfdD1FB8v6+w1T0Ns=
github.com/btcsuite/btclog v1.0.0/go.mod h1:w7xnGOhwT3lmrS4H3b/D1XAXxvh+tbhUm8xeHN2y3TQ=
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts=
github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd h1:R/opQEbFEy9JGkIguV40SvRY1uliPX8ifOvi6ICsFCw=
github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg=
github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY=
@ -34,25 +33,22 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792 h1:R8vQdOQdZ9Y3SkEwmHoWBmX1DNXhXZqlTpq6s4tyJGc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/bytedance/sonic v1.11.6 h1:oUp34TzMlL+OY1OUWxHqsdkgC/Zfc85zGqw9siXjrc0=
github.com/bytedance/sonic v1.11.6/go.mod h1:LysEHSvpvDySVdC2f87zGWf6CIKJcAvqab1ZaiQtds4=
github.com/bytedance/sonic/loader v0.1.1 h1:c+e5Pt1k/cy5wMveRDyk2X4B9hF4g7an8N3zCYjJFNM=
github.com/bytedance/sonic v1.12.8 h1:4xYRVRlXIgvSZ4e8iVTlMF5szgpXd4AfvuWgA8I8lgs=
github.com/bytedance/sonic v1.12.8/go.mod h1:uVvFidNmlt9+wa31S1urfwwthTWteBgG0hWuoKAXTx8=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/sonic/loader v0.2.3 h1:yctD0Q3v2NOGfSWPLPvG2ggA2kV6TS6s4wioyEqssH0=
github.com/bytedance/sonic/loader v0.2.3/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500 h1:6lhrsTEnloDPXyeZBvSYvQf8u86jbKehZPVDDlkgDl4=
github.com/c2h5oh/datasize v0.0.0-20231215233829-aa82cc1e6500/go.mod h1:S/7n9copUssQ56c7aAgHqftWO4LTf4xY6CGWt8Bc+3M=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -60,79 +56,90 @@ github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db h1:oZ4U9IqO8NS+61OmGTBi8vopzqTRxwQeogyBHdrhjbc=
github.com/ekyoung/gin-nice-recovery v0.0.0-20160510022553-1654dca486db/go.mod h1:Pk7/9x6tyChFTkahDvLBQMlvdsWvfC+yU8HTT5VD314=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/friendsofgo/errors v0.9.2 h1:X6NYxef4efCBdwI7BgS820zFaN7Cphrmb+Pljdzjtgk=
github.com/friendsofgo/errors v0.9.2/go.mod h1:yCvFW5AkDIL9qn7suHVLiI/gH228n7PC4Pn44IGoTOI=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0=
github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M=
github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E=
github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk=
github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-ini/ini v1.67.0 h1:z6ZrTEZqSWOTyH2FlglNbNgARyHG8oLW9gMELqKr06A=
github.com/go-ini/ini v1.67.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.20.0 h1:K9ISHbSaI0lyB2eWMPJo+kOS/FBExVwjEviJTixqxL8=
github.com/go-playground/validator/v10 v10.20.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM=
github.com/go-playground/validator/v10 v10.24.0 h1:KHQckvo8G6hlWnrPX4NJJ+aBfWNAE/HH+qdL2cBpCmg=
github.com/go-playground/validator/v10 v10.24.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus=
github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y=
github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-test/deep v1.0.4 h1:u2CU3YKy9I2pmu9pX0eq50wCgjfGIt539SqR7FbHiho=
github.com/go-test/deep v1.0.4/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/go-viper/mapstructure/v2 v2.2.1 h1:ZAaOCxANMuZx5RCeg0mBdEZk7DZasvvZIxtHqx8aGss=
github.com/go-viper/mapstructure/v2 v2.2.1/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0=
github.com/gofrs/uuid v4.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gofrs/uuid v4.4.0+incompatible h1:3qXRTX8/NbyulANqlc0lchS1gqAVxRgsuW1YrTJupqA=
github.com/gofrs/uuid v4.4.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU=
github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gops v0.3.28 h1:2Xr57tqKAmQYRAfG12E+yLcoa2Y42UJo2lOrUFL9ark=
github.com/google/gops v0.3.28/go.mod h1:6f6+Nl8LcHrzJwi8+p0ii+vmBFSlB4f8cOOkTJ7sk4c=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd h1:1FjCyPC+syAzJ5/2S8fqdZK1R22vvA0J7JZKcuOIQ7Y=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/pprof v0.0.0-20250208200701-d0013a598941 h1:43XjGa6toxLpeksjcxs1jIoIyr+vUfOqY2c6HB4bpoc=
github.com/google/pprof v0.0.0-20250208200701-d0013a598941/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144=
github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY=
github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ=
github.com/gorilla/rpc v1.2.0 h1:WvvdC2lNeT1SP32zrIce5l0ECBfbAlmrmSBsuc57wfk=
github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY=
github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
@ -140,32 +147,26 @@ github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtng
github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc=
github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60=
github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4=
github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM=
github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk=
github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA=
github.com/hashicorp/go-metrics v0.5.4 h1:8mmPiIJkTPPEbAiV97IxdAGNdRdaWwVap1BU6elejKY=
github.com/hashicorp/go-metrics v0.5.4/go.mod h1:CG5yz4NZ/AI/aQt9Ucm/vdBnbh7fvmv4lxZ350i+QQI=
github.com/hashicorp/go-msgpack/v2 v2.1.2 h1:4Ee8FTp834e+ewB71RDrQ0VKpyFdrKOjvYtnQ/ltVj0=
github.com/hashicorp/go-msgpack/v2 v2.1.2/go.mod h1:upybraOAblm4S7rx0+jeNy+CWWhzywQsSRV5033mMu4=
github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo=
github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM=
github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs=
github.com/hashicorp/go-sockaddr v1.0.0 h1:GeH6tui99pF4NJgfnhp+L6+FfobzVW3Ah46sLo0ICXs=
github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU=
github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4=
github.com/hashicorp/go-sockaddr v1.0.5 h1:dvk7TIXCZpmfOlM+9mlcrWmWjw/wlKT+VDq2wMvfPJU=
github.com/hashicorp/go-sockaddr v1.0.5/go.mod h1:uoUUmtwU7n9Dv3O4SNLeFvg0SxQ3lyjsj6+CCykpaxI=
github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE=
github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc=
github.com/hashicorp/memberlist v0.5.0 h1:EtYPN8DpAURiapus508I4n9CzHs2W+8NZGbmmR/prTM=
github.com/hashicorp/memberlist v0.5.0/go.mod h1:yvyXLpo0QaGE59Y7hDTsTzDD25JYBZ4mHgHUZ8lrOI0=
github.com/hashicorp/serf v0.10.1 h1:Z1H2J60yRKvfDYAOZLd2MU0ND4AH/WDz7xYHDWQsIPY=
github.com/hashicorp/serf v0.10.1/go.mod h1:yL2t6BqATOLGc5HF7qbFkTfXoPIY0WZdWHfEvMqbG+4=
github.com/hashicorp/golang-lru v1.0.2 h1:dV3g9Z/unq5DpblPpw+Oqcv4dU/1omnb4Ok8iPY6p1c=
github.com/hashicorp/golang-lru v1.0.2/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/memberlist v0.5.2 h1:rJoNPWZ0juJBgqn48gjy59K5H4rNgvUoM1kUD7bXiuI=
github.com/hashicorp/memberlist v0.5.2/go.mod h1:Ri9p/tRShbjYnpNf4FFPXG7wxEGY4Nrcn6E7jrVa//4=
github.com/hashicorp/serf v0.10.2 h1:m5IORhuNSjaxeljg5DeQVDlQyVkhRIjJDimbkCa8aAc=
github.com/hashicorp/serf v0.10.2/go.mod h1:T1CmSGfSeGfnfNy/w0odXQUR1rfECGd2Qdsp84DjOiY=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/ianlancetaylor/demangle v0.0.0-20210905161508-09a460cdf81d/go.mod h1:aYm2/VgdVmcIU8iMfdMvDMsRAQjcfZSKFby6HOFvi/w=
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf h1:WfD7VjIE6z8dIvMsI4/s+1qr5EL+zoIGev1BQj1eoJ8=
github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h1:hyb9oH7vZsitZCiBt0ZvifOrB+qc8PS5IiilCIb87rg=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
@ -175,28 +176,29 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/johntdyer/slack-go v0.0.0-20230314151037-c5bf334f9b6e h1:5tRmeUw/tXT/DvaoloWTWwlyrEZrKA7pnrz/X+g9s34=
github.com/johntdyer/slack-go v0.0.0-20230314151037-c5bf334f9b6e/go.mod h1:u0Jo4f2dNlTJeeOywkM6bLwxq6gC3pZ9rEFHn3AhTdk=
github.com/johntdyer/slackrus v0.0.0-20230315191314-80bc92dee4fc h1:enUIjGI+ljPLV2X3Mu3noR0P3m2NaIFGRsp96J8RBio=
github.com/johntdyer/slackrus v0.0.0-20230315191314-80bc92dee4fc/go.mod h1:EM3NFHkhmCX05s6UvxWSJ8h/3mluH4tF6bYr9FXF1Cg=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/karrick/godirwalk v1.17.0 h1:b4kY7nqDdioR/6qnbHQyDvmA17u5G1cZ6J+CZXwSWoI=
github.com/karrick/godirwalk v1.17.0/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA=
github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/cpuid/v2 v2.2.9 h1:66ze0taIn2H33fBvCkXuv9BmCwDfafmiIVpKV9kKGuY=
github.com/klauspost/cpuid/v2 v2.2.9/go.mod h1:rqkxqrZ1EhYM9G+hXH7YdowN5R5RGN6NK4QwQ3WMXF8=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
@ -219,23 +221,11 @@ github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ=
github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI=
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5 h1:mG83tLXWSRdcXMWfkoumVwhcCbf3jHF9QKv/m37BkM0=
github.com/lyoshenka/bencode v0.0.0-20180323155644-b7abd7672df5/go.mod h1:H0aPCWffGOaDcjkw1iB7W9DVLp6GXmfcJY/7YZCWPA4=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso=
github.com/miekg/dns v1.1.41 h1:WMszZWJG0XmzbK9FEmzH2TVcqYzFesusSIB41b8KHxY=
github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI=
github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/miekg/dns v1.1.56 h1:5imZaSeoRNvpM9SzWNhEcP9QliKiz20/dA2QabIGVnE=
github.com/miekg/dns v1.1.56/go.mod h1:cRm6Oo2C8TY9ZS/TqsSrseAcncm74lfK5G+ikN2SWWY=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -248,18 +238,18 @@ github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjY
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q=
github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k=
github.com/onsi/ginkgo/v2 v2.22.2 h1:/3X8Panh8/WwhU/3Ssa6rCKqPLuAkVY2I0RoyDLySlU=
github.com/onsi/ginkgo/v2 v2.22.2/go.mod h1:oeMosUL+8LtarXBHu/c0bx2D/K9zyQ6uX3cTyztHwsk=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8=
github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY=
github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5 h1:Ii+DKncOVM8Cu1Hc+ETb5K+23HdAMvESYE3ZJ5b5cMI=
github.com/phayes/freeport v0.0.0-20220201140144-74d24b5ae9f5/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -269,11 +259,11 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.20.5 h1:cxppBPuYhUnsO6yo/aoRol4L7q7UFfdm+bR9r+8l63Y=
github.com/prometheus/client_golang v1.20.5/go.mod h1:PIEt8X02hGcP8JWbeHyeZ53Y/jReSnHgO035n//V5WE=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
@ -283,51 +273,53 @@ github.com/prometheus/client_model v0.6.1 h1:ZKSh/rekM+n3CeS952MLRAdFwIKqeY8b62p
github.com/prometheus/client_model v0.6.1/go.mod h1:OrxVMOVHjw3lKMa8+x6HeMGkHMQyHDk9E3jmP2AmGiY=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
github.com/prometheus/common v0.55.0 h1:KEi6DK7lXW/m7Ig5i47x0vRzuBsHuvJdi5ee6Y3G1dc=
github.com/prometheus/common v0.55.0/go.mod h1:2SECS4xJG1kd8XF9IcM1gMX6510RAEL65zxzNImwdc8=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.62.0 h1:xasJaQlnWAeyHdUBeGjXmutelfJHWMRr+Fg4QszZ2Io=
github.com/prometheus/common v0.62.0/go.mod h1:vyBcEuLSvWos9B1+CyL7JZ2up+uFzXhkqml0W5zIY1I=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
github.com/quic-go/quic-go v0.48.2 h1:wsKXZPeGWpMpCGSWqOcqpW2wZYic/8T3aqiOID0/KWE=
github.com/quic-go/quic-go v0.48.2/go.mod h1:yBgs3rWBOADpga7F+jJsb6Ybg1LSYiQvwWlLX+/6HMs=
github.com/quic-go/quic-go v0.49.0 h1:w5iJHXwHxs1QxyBv1EHKuC50GX5to8mJAxvtnttJp94=
github.com/quic-go/quic-go v0.49.0/go.mod h1:s2wDnmCdooUQBmQfpUSTCYBl1/D4FcqbULMMkASvR6s=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo=
github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 h1:nn5Wsu0esKSJiIVhscUtVbo7ada43DJhG55ua/hjS5I=
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/sebdah/goldie v1.0.0 h1:9GNhIat69MSlz/ndaBg48vl9dF5fI+NBB6kfOxgfkMc=
github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4=
github.com/sergi/go-diff v1.3.1 h1:xkr+Oxo4BOQKmkn/B9eMK0g5Kg/983T9DqqPHwYqD+8=
github.com/sergi/go-diff v1.3.1/go.mod h1:aMJSSKb2lpPvRNec0+w3fl7LP9IOFzdc9Pa4NFbPK1I=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/slack-go/slack v0.12.1 h1:X97b9g2hnITDtNsNe5GkGx6O2/Sz/uC20ejRZN6QxOw=
github.com/slack-go/slack v0.12.1/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
github.com/slack-go/slack v0.16.0 h1:khp/WCFv+Hb/B/AJaAwvcxKun0hM6grN0bUZ8xG60P8=
github.com/slack-go/slack v0.16.0/go.mod h1:hlGi5oXA+Gt+yWTPP0plCdRKmjsDxecdHxYQdlMQKOw=
github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0=
github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.18.2 h1:LUXCnvUvSM6FXAsj6nnfc8Q2tp1dIgUfY9Kc8GsSOiQ=
github.com/spf13/viper v1.18.2/go.mod h1:EKmWIqdnk5lOcmR72yw6hS+8OPYcwD0jteitLMVB+yk=
github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs=
github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4=
github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y=
github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v1.20.1 h1:ZMi+z/lvLyPSCoNtFCpqjy0S4kPbirhpTMwl8BkW9X4=
github.com/spf13/viper v1.20.1/go.mod h1:P9Mdzt1zoHIG8m2eZQinpiBjo6kCmZSKBClNNqjJvu4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
@ -336,14 +328,13 @@ github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
@ -358,111 +349,107 @@ github.com/volatiletech/null/v8 v8.1.2/go.mod h1:98DbwNoKEpRrYtGjWFctievIfm4n4Mx
github.com/volatiletech/randomize v0.0.1 h1:eE5yajattWqTB2/eN8df4dw+8jwAzBtbdo5sbWC4nMk=
github.com/volatiletech/randomize v0.0.1/go.mod h1:GN3U0QYqfZ9FOJ67bzax1cqZ5q2xuj2mXrXBjWaRTlY=
github.com/volatiletech/strmangle v0.0.1/go.mod h1:F6RA6IkB5vq0yTG4GQ0UsbbRcl3ni9P76i+JrTBKFFg=
github.com/volatiletech/strmangle v0.0.6 h1:AdOYE3B2ygRDq4rXDij/MMwq6KVK/pWAYxpC7CLrkKQ=
github.com/volatiletech/strmangle v0.0.6/go.mod h1:ycDvbDkjDvhC0NUU8w3fWwl5JEMTV56vTKXzR3GeR+0=
github.com/volatiletech/strmangle v0.0.8 h1:UZkTDFIjZcL1Lk4BXhGsxcyXxNcWuM5ZwdzZc0sJcWg=
github.com/volatiletech/strmangle v0.0.8/go.mod h1:ycDvbDkjDvhC0NUU8w3fWwl5JEMTV56vTKXzR3GeR+0=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.uber.org/atomic v1.11.0 h1:ZvwS0R+56ePWxUNi+Atn9dWONBPp/AUETXlHW0DxSjE=
go.uber.org/atomic v1.11.0/go.mod h1:LUxbIzbOniOlMKjJjyPfpl4v+PKK2cNJn91OQbhoJI0=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc=
go.uber.org/multierr v1.9.0 h1:7fIwc/ZtS0q++VgcfqFDxSBZVv/Xo49/SYnDFupUwlI=
go.uber.org/multierr v1.9.0/go.mod h1:X2jQV1h+kxSjClGpnseKVIxpmcjrj7MNnI0bnlfKTVQ=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.8.0 h1:3wRIsP3pM4yUptoR96otTUOXI367OS0+c9eeRi9doIc=
golang.org/x/arch v0.8.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
go.uber.org/mock v0.5.0 h1:KAMbZvZPyBPWgD14IrIQ38QCyjwpvVVV6K/bHl1IwQU=
go.uber.org/mock v0.5.0/go.mod h1:ge71pBPLYDk7QIi1LupWxdAykm7KIEFchiOqd6z7qMM=
go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/arch v0.14.0 h1:z9JUEZWr8x4rR0OU6c4/4t6E6jOZ8/QBS2bBYBm4tx4=
golang.org/x/arch v0.14.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJkTFJCVUX+S/ZT6wYzM=
golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc=
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac h1:l5+whBCLH3iH2ZNHYLbAe58bo7yrN4mVcnkHDYz5vvs=
golang.org/x/exp v0.0.0-20250210185358-939b2ce775ac/go.mod h1:hH+7mtFmImwwcMvScyxUhjuVHR3HGaDPMn9rMSUUbxo=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM=
golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8=
golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE=
golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg=
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY=
golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 h1:H2TDz8ibqkAF6YGhCdN3jS9O0/s90v0rJh3X/OLHEUk=
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg=
google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY=
golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79 h1:FpCr9V8wuOei4BAen+93HtVJ+XSi+KPbaPKm0Vj5R64=
gopkg.in/nullbio/null.v6 v6.0.0-20161116030900-40264a2e6b79/go.mod h1:gWkaRU7CoXpezCBWfWjm3999QqS+1pYPXGbqQCTMzo8=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
@ -471,6 +458,7 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@ -479,4 +467,3 @@ gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View file

@ -69,6 +69,7 @@ const (
DirectionDownload = "download" // from reflector
LabelCacheType = "cache_type"
LabelOrigin = "origin"
LabelComponent = "component"
LabelSource = "source"
@ -135,18 +136,12 @@ var (
Name: "hit_total",
Help: "Total number of blobs retrieved from the cache storage",
}, []string{LabelCacheType, LabelComponent})
ThisHitCount = promauto.NewCounter(prometheus.CounterOpts{
ItttHitCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: subsystemITTT,
Name: "this_hit_total",
Help: "Total number of blobs retrieved from the this storage",
})
ThatHitCount = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Subsystem: subsystemITTT,
Name: "that_hit_total",
Help: "Total number of blobs retrieved from the that storage",
})
Name: "hits_total",
Help: "Total number of blobs retrieved from the this/that storage",
}, []string{LabelOrigin})
CacheMissCount = promauto.NewCounterVec(prometheus.CounterOpts{
Namespace: ns,
Subsystem: subsystemCache,
@ -229,6 +224,11 @@ var (
Name: "s3_out_bytes",
Help: "Total number of outgoing bytes (to S3)",
})
MtrInBytesUpstream = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "upstream_in_bytes",
Help: "Total number of incoming bytes (from Upstream)",
})
MtrInBytesS3 = promauto.NewCounter(prometheus.CounterOpts{
Namespace: ns,
Name: "s3_in_bytes",
@ -257,6 +257,11 @@ func CacheLabels(name, component string) prometheus.Labels {
LabelComponent: component,
}
}
func ItttLabels(orig string) prometheus.Labels {
return prometheus.Labels{
LabelOrigin: orig,
}
}
func TrackError(direction string, e error) (shouldLog bool) { // shouldLog is a hack, but whatever
if e == nil {

341472
livelock.log Normal file

File diff suppressed because it is too large Load diff

View file

@ -2,6 +2,7 @@ package prism
import (
"context"
"fmt"
"strconv"
"sync"
@ -78,8 +79,8 @@ func New(conf *Config) *Prism {
db: conf.DB,
dht: d,
cluster: c,
peer: peer.NewServer(conf.Blobs),
reflector: reflector.NewServer(conf.Blobs, conf.Blobs),
peer: peer.NewServer(conf.Blobs, fmt.Sprintf(":%d", conf.PeerPort)),
reflector: reflector.NewIngestionServer(conf.Blobs),
grp: stop.New(),
}
@ -107,7 +108,7 @@ func (p *Prism) Start() error {
return errors.Err("blobs required in conf")
}
err = p.peer.Start(":" + strconv.Itoa(p.conf.PeerPort))
err = p.peer.Start()
if err != nil {
return err
}

271
readme.md
View file

@ -1,124 +1,189 @@
# Reflector
Reflector is a central piece of software that providers LBRY with the following features:
- Blobs reflection: when something is published, we capture the data and store it on our servers for quicker retrieval
- Blobs distribution: when a piece of content is requested and the LBRY network doesn't have it, reflector will retrieve it from its storage and distribute it
- Blobs caching: reflectors can be chained together in multiple regions or servers to form a chain of cached content. We call those "blobcaches". They are layered so that content distribution is favorable in all the regions we deploy it to
Production-ready blob reflection, distribution, and caching for Odysee.
There are a few other features embedded in reflector.go including publishing streams from Go, downloading or upload blobs, resolving content and more unfinished tools.
This repository provides the components used in production:
- Reflector ingestion server (command name: `reflector`)
- Blob cache/edge server (`blobcache`)
- Uploader to object storage (`upload`)
This code includes a Go implementations of the LBRY peer protocol, reflector protocol, and DHT.
Other commands exist in the tree for historical/legacy reasons and are not supported.
## Installation
## How it works (at a glance)
- Ingestion (reflector): accepts uploaded blobs, persists them to object storage (e.g., S3/Wasabi) and tracks state in MySQL.
- Distribution: serves blobs over HTTP/HTTP3/Peer. Blobcaches can be deployed in front of the origin to reduce latency and egress.
- Caching (blobcache): layered disk caches backed by HTTP(S) origins (e.g., S3 endpoints), with optional local DB metadata for capacity/eviction.
- Install mysql 8 (5.7 might work too)
- add a reflector user and database with password `reflector` with localhost access only
- Create the tables as described [here](https://github.com/lbryio/reflector.go/blob/master/db/db.go#L735) (the link might not update as the code does so just look for the schema in that file)
All services are started by the `prism` binary and are configured via YAML files loaded from a configuration directory.
#### We do not support running reflector.go as a blob receiver, however if you want to run it as a private blobcache you may compile it yourself and run it as following:
## Supported commands
The following are the only supported commands for production use:
- Reflector ingestion: `prism reflector`
- Flags: `--receiver-port` (default 5566), `--metrics-port` (default 2112), `--disable-blocklist`
- Loads `reflector.yaml` from the config directory.
- Blob cache: `prism blobcache`
- Flags: `--metrics-port` (default 2112), `--disable-blocklist`
- Loads `blobcache.yaml` from the config directory.
- Uploader: `prism upload PATH`
- Flags: `--workers`, `--skipExistsCheck`, `--deleteBlobsAfterUpload`
- Loads `upload.yaml` from the config directory.
Global flag for all commands:
- `--conf-dir` (default `./`): directory containing YAML config files.
## Configuration
Configuration is per-command. The loader reads `<command>.yaml` from `--conf-dir`.
Common sections:
- `servers`: enables HTTP/HTTP3/Peer servers. Keys: `http`, `http3`, `peer`. Each accepts:
- `port` (int)
- `max_concurrent_requests` (int, http/http3)
- `edge_token` (string, http)
- `address` (string, optional; bind address, omit for all interfaces)
- `store`: defines the storage topology using composable stores. Frequently used:
- `proxied-s3`: production pattern with a `writer` (DB-backed -> S3/multiwriter) and a `reader` (caching -> disk + HTTP origins).
- `caching`: layered cache with a `cache` (often `db_backed` -> `disk`) and an `origin` chain (`http`, `http3`, or `ittt` fan-in).
- `s3`, `disk`, `multiwriter`, `db_backed`, `http`, `http3`, `peer`, `upstream` are also available building blocks.
### Minimal examples
Reflector conf-dir contains `reflector.yaml`:
```yaml
servers:
http:
port: 5569
max_concurrent_requests: 200
http3:
port: 5568
max_concurrent_requests: 200
peer:
port: 5567
store:
proxied-s3:
name: s3_read_proxy
writer:
db_backed:
user: reflector
password: reflector
database: reflector
host: localhost
port: 3306
access_tracking: 1
soft_deletes: true
store:
s3:
name: primary
aws_id: YOUR_KEY
aws_secret: YOUR_SECRET
region: us-east-1
bucket: blobs-bucket
endpoint: https://s3.yourendpoint.tv
reader:
caching:
cache:
disk:
name: local_cache
mount_point: /mnt/reflector/cache
sharding_size: 2
origin:
http:
endpoint: https://s3.yourendpoint.tv/blobs-bucket/
sharding_size: 4
```
Blobcache conf-dir contains `blobcache.yaml`:
```yaml
servers:
http:
port: 5569
max_concurrent_requests: 200
http3:
port: 5568
max_concurrent_requests: 200
peer:
port: 5567
store:
caching:
cache:
db_backed:
user: reflector
password: reflector
database: reflector
host: localhost
port: 3306
has_cap: true
max_size: 500GB
store:
disk:
name: blobcache
mount_point: /mnt/blobcache/cache
sharding_size: 2
origin:
http:
endpoint: https://s3.yourendpoint.tv/blobs-bucket/
sharding_size: 4
```
Uploader conf-dir contains `upload.yaml` (points to the same writer/backend as reflector):
```yaml
database:
user: reflector
password: reflector
database: reflector
host: localhost
port: 3306
store:
proxied-s3:
writer:
db_backed:
user: reflector
password: reflector
database: reflector
host: localhost
port: 3306
store:
s3:
aws_id: YOUR_KEY
aws_secret: YOUR_SECRET
region: us-east-1
bucket: blobs-bucket
endpoint: https://s3.yourendpoint.tv
```
## Quick start
1) Build
- Requires Go 1.23+
- `make` (binaries in `dist/<platform>/prism-bin`)
2) Run a local blobcache
```bash
./prism-bin reflector \
--conf="none" \
--disable-uploads=true \
--use-db=false \
--upstream-reflector="reflector.lbry.com" \
--upstream-protocol="http" \
--request-queue-size=200 \
--disk-cache="2GB:/path/to/your/storage/:localdb" \
./dist/linux_amd64/prism-bin --conf-dir=./ blobcache
```
Place your `blobcache.yaml` in the `--conf-dir` directory.
Create a systemd script if you want to run it automatically on startup or as a service.
## Usage
Usage as reflector/blobcache:
3) Run reflector ingestion
```bash
Run reflector server
Usage:
prism reflector [flags]
Flags:
--disable-blocklist Disable blocklist watching/updating
--disable-uploads Disable uploads to this reflector server
--disk-cache string Where to cache blobs on the file system. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfuda/lru) (default "100GB:/tmp/downloaded_blobs:localdb")
-h, --help help for reflector
--http-peer-port int The port reflector will distribute content from over HTTP protocol (default 5569)
--http3-peer-port int The port reflector will distribute content from over HTTP3 protocol (default 5568)
--mem-cache int enable in-memory cache with a max size of this many blobs
--metrics-port int The port reflector will use for prometheus metrics (default 2112)
--optional-disk-cache string Optional secondary file system cache for blobs. format is 'sizeGB:CACHE_PATH:cachemanager' (cachemanagers: localdb/lfuda/lru) (this would get hit before the one specified in disk-cache)
--origin-endpoint string HTTP edge endpoint for standard HTTP retrieval
--origin-endpoint-fallback string HTTP edge endpoint for standard HTTP retrieval if first origin fails
--receiver-port int The port reflector will receive content from (default 5566)
--request-queue-size int How many concurrent requests from downstream should be handled at once (the rest will wait) (default 200)
--tcp-peer-port int The port reflector will distribute content from for the TCP (LBRY) protocol (default 5567)
--upstream-protocol string protocol used to fetch blobs from another upstream reflector server (tcp/http3/http) (default "http")
--upstream-reflector string host:port of a reflector server where blobs are fetched from
--use-db Whether to connect to the reflector db or not (default true)
Global Flags:
--conf string Path to config. Use 'none' to disable (default "config.json")
-v, --verbose strings Verbose logging for specific components
./dist/linux_amd64/prism-bin --conf-dir=./ reflector --receiver-port=5566 --metrics-port=2112
```
Other uses:
4) Upload blobs
```bash
Prism is a single entry point application with multiple sub modules which can be leveraged individually or together
Usage:
prism [command]
Available Commands:
check-integrity check blobs integrity for a given path
cluster Start(join) to or Start a new cluster
decode Decode a claim value
dht Run dht node
getstream Get a stream from a reflector server
help Help about any command
peer Run peer server
populate-db populate local database with blobs from a disk storage
publish Publish a file
reflector Run reflector server
resolve Resolve a URL
send Send a file to a reflector
sendblob Send a random blob to a reflector server
start Runs full prism application with cluster, dht, peer server, and reflector server.
test Test things
upload Upload blobs to S3
version Print the version
Flags:
--conf string Path to config. Use 'none' to disable (default "config.json")
-h, --help help for prism
-v, --verbose strings Verbose logging for specific components
```
## Running from Source
This project requires [Go v1.20](https://golang.org/doc/install).
On Ubuntu you can install it with `sudo snap install go --classic`
```
git clone git@github.com:lbryio/reflector.go.git
cd reflector.go
make
./dist/linux_amd64/prism-bin
./dist/linux_amd64/prism-bin --conf-dir=./ upload /path/to/blobs \
--workers=4 --skipExistsCheck
```
## Contributing
coming soon
## License
This project is MIT licensed.
## Notes
- Only reflector, blobcache, and upload are supported. All other commands are legacy and may be removed in the future.
- Metrics are exposed on the configured `--metrics-port` at `/metrics` (Prometheus format).
- MySQL is required when using DB-backed stores (e.g., ingestion writer, capacity-aware caches).
## Security
If you discover a security issue, please email security@lbry.com. Our PGP key is available at https://lbry.com/faq/pgp-key.
We take security seriously. Please contact security@lbry.com regarding any security issues.
Our PGP key is [here](https://lbry.com/faq/pgp-key) if you need it.
## License
MIT License. See LICENSE.
## Contact
The primary contact for this project is [@Nikooo777](https://github.com/Nikooo777) (niko-at-lbry.com)
The primary contact for this project is [@Nikooo777](https://github.com/Nikooo777)

View file

@ -22,10 +22,10 @@ const blocklistURL = "https://api.lbry.com/file/list_blocked"
func (s *Server) enableBlocklist(b store.Blocklister) {
walletServers := []string{
"spv25.lbry.com:50001",
"spv26.lbry.com:50001",
"spv19.lbry.com:50001",
"spv14.lbry.com:50001",
"a-hub1.odysee.com:50001",
"b-hub1.odysee.com:50001",
"c-hub1.odysee.com:50001",
"s-hub1.odysee.com:50001",
}
updateBlocklist(b, walletServers, s.grp.Ch())

View file

@ -10,7 +10,7 @@ import (
"golang.org/x/sync/singleflight"
)
const protectedListURL = "https://api.odysee.com/file/list_protected"
const protectedListURL = "https://direct.api.odysee.com/file/list_protected"
type ProtectedContent struct {
SDHash string `json:"sd_hash"`
@ -32,7 +32,10 @@ func GetProtectedContent() (interface{}, error) {
Data []ProtectedContent `json:"data"`
}
client := &http.Client{}
// Bound the request to avoid hanging the entire request path.
// Without a timeout, a slow or unreachable endpoint can block
// singleflight callers indefinitely and stall HTTP handlers.
client := &http.Client{Timeout: 5 * time.Second}
req, err := http.NewRequest(method, protectedListURL, nil)
if err != nil {

View file

@ -39,18 +39,17 @@ type Server struct {
EnableBlocklist bool // if true, blocklist checking and blob deletion will be enabled
underlyingStore store.BlobStore
outerStore store.BlobStore
grp *stop.Group
//underlyingStore store.BlobStore
//outerStore store.BlobStore
store store.BlobStore
grp *stop.Group
}
// NewServer returns an initialized reflector server pointer.
func NewServer(underlying store.BlobStore, outer store.BlobStore) *Server {
func NewIngestionServer(store store.BlobStore) *Server {
return &Server{
Timeout: DefaultTimeout,
underlyingStore: underlying,
outerStore: outer,
grp: stop.New(),
Timeout: DefaultTimeout,
store: store,
grp: stop.New(),
}
}
@ -89,7 +88,7 @@ func (s *Server) Start(address string) error {
}()
if s.EnableBlocklist {
if b, ok := s.underlyingStore.(store.Blocklister); ok {
if b, ok := s.store.(store.Blocklister); ok {
s.grp.Add(1)
metrics.RoutinesQueue.WithLabelValues("reflector", "enableblocklist").Inc()
go func() {
@ -200,13 +199,13 @@ func (s *Server) receiveBlob(conn net.Conn) error {
}
var wantsBlob bool
if bl, ok := s.underlyingStore.(store.Blocklister); ok {
if bl, ok := s.store.(store.Blocklister); ok {
wantsBlob, err = bl.Wants(blobHash)
if err != nil {
return err
}
} else {
blobExists, err := s.underlyingStore.Has(blobHash)
blobExists, err := s.store.Has(blobHash)
if err != nil {
return err
}
@ -216,14 +215,14 @@ func (s *Server) receiveBlob(conn net.Conn) error {
var neededBlobs []string
if isSdBlob && !wantsBlob {
if nbc, ok := s.underlyingStore.(neededBlobChecker); ok {
if nbc, ok := s.store.(store.NeededBlobChecker); ok {
neededBlobs, err = nbc.MissingBlobsForKnownStream(blobHash)
if err != nil {
return err
}
} else {
// if we can't check for blobs in a stream, we have to say that the sd blob is
// missing. if we say we have the sd blob, they wont try to send any content blobs
// missing. if we say we have the sd blob, they won't try to send any content blobs
wantsBlob = true
}
}
@ -259,9 +258,9 @@ func (s *Server) receiveBlob(conn net.Conn) error {
log.Debugln("Got blob " + blobHash[:8])
if isSdBlob {
err = s.outerStore.PutSD(blobHash, blob)
err = s.store.PutSD(blobHash, blob)
} else {
err = s.outerStore.Put(blobHash, blob)
err = s.store.Put(blobHash, blob)
}
if err != nil {
return err
@ -450,8 +449,3 @@ type blobTransferResponse struct {
type sdBlobTransferResponse struct {
ReceivedSdBlob bool `json:"received_sd_blob"`
}
// neededBlobChecker can check which blobs from a known stream are not uploaded yet
type neededBlobChecker interface {
MissingBlobsForKnownStream(string) ([]string, error)
}

View file

@ -23,7 +23,7 @@ func startServerOnRandomPort(t *testing.T) (*Server, int) {
t.Fatal(err)
}
srv := NewServer(store.NewMemStore(), store.NewMemStore())
srv := NewIngestionServer(store.NewMemStore(store.MemParams{Name: "test"}))
err = srv.Start("127.0.0.1:" + strconv.Itoa(port))
if err != nil {
t.Fatal(err)
@ -120,7 +120,7 @@ func TestServer_Timeout(t *testing.T) {
t.Fatal(err)
}
srv := NewServer(store.NewMemStore(), store.NewMemStore())
srv := NewIngestionServer(store.NewMemStore(store.MemParams{Name: "test"}))
srv.Timeout = testTimeout
err = srv.Start("127.0.0.1:" + strconv.Itoa(port))
if err != nil {
@ -182,8 +182,8 @@ func TestServer_PartialUpload(t *testing.T) {
missing[i] = bits.Rand().String()
}
st := store.BlobStore(&mockPartialStore{MemStore: store.NewMemStore(), missing: missing})
if _, ok := st.(neededBlobChecker); !ok {
st := store.BlobStore(&mockPartialStore{MemStore: store.NewMemStore(store.MemParams{Name: "test"}), missing: missing})
if _, ok := st.(store.NeededBlobChecker); !ok {
t.Fatal("mock does not implement the relevant interface")
}
err = st.Put(sdHash, randBlob(10))
@ -191,7 +191,7 @@ func TestServer_PartialUpload(t *testing.T) {
t.Fatal(err)
}
srv := NewServer(st, st)
srv := NewIngestionServer(st)
err = srv.Start("127.0.0.1:" + strconv.Itoa(port))
if err != nil {
t.Fatal(err)

View file

@ -30,7 +30,7 @@ type Summary struct {
type Uploader struct {
db *db.SQL
store *store.DBBackedStore // could just be store.BlobStore interface
store store.BlobStore
workers int
skipExistsCheck bool
deleteBlobsAfterUpload bool
@ -40,7 +40,7 @@ type Uploader struct {
count Summary
}
func NewUploader(db *db.SQL, store *store.DBBackedStore, workers int, skipExistsCheck, deleteBlobsAfterUpload bool) *Uploader {
func NewUploader(db *db.SQL, store store.BlobStore, workers int, skipExistsCheck, deleteBlobsAfterUpload bool) *Uploader {
return &Uploader{
db: db,
store: store,
@ -132,7 +132,7 @@ Upload:
return nil
}
// worker reads paths from a channel, uploads them, and optionally deletes them
// worker reads paths from a channel, uploads them, and optionally deletes them
func (u *Uploader) worker(pathChan chan string) {
for {
select {

View file

@ -2,6 +2,7 @@ package http
import (
"net/http"
"strconv"
"sync"
"time"
@ -79,6 +80,7 @@ func (s *Server) HandleGetBlob(c *gin.Context) {
metrics.HttpDownloadCount.Inc()
c.Header("Via", serialized)
c.Header("Content-Disposition", "filename="+hash)
c.Header("Content-Length", strconv.Itoa(len(blob)))
c.Data(http.StatusOK, "application/octet-stream", blob)
}

View file

@ -22,16 +22,18 @@ type Server struct {
concurrentRequests int
missesCache gcache.Cache
edgeToken string
address string
}
// NewServer returns an initialized Server pointer.
func NewServer(store store.BlobStore, requestQueueSize int, edgeToken string) *Server {
func NewServer(store store.BlobStore, requestQueueSize int, edgeToken string, address string) *Server {
return &Server{
store: store,
grp: stop.New(),
concurrentRequests: requestQueueSize,
missesCache: gcache.New(2000).Expiration(5 * time.Minute).ARC().Build(),
edgeToken: edgeToken,
address: address,
}
}
@ -43,7 +45,7 @@ func (s *Server) Shutdown() {
}
// Start starts the server listener to handle connections.
func (s *Server) Start(address string) error {
func (s *Server) Start() error {
gin.SetMode(gin.ReleaseMode)
router := gin.New()
router.Use(gin.Logger())
@ -52,7 +54,7 @@ func (s *Server) Start(address string) error {
router.GET("/blob", s.getBlob)
router.HEAD("/blob", s.hasBlob)
srv := &http.Server{
Addr: address,
Addr: s.address,
Handler: router,
}
go s.listenForShutdown(srv)
@ -62,7 +64,7 @@ func (s *Server) Start(address string) error {
s.grp.Add(1)
go func() {
defer s.grp.Done()
log.Println("HTTP server listening on " + address)
log.Println("HTTP server listening on " + s.address)
if err := srv.ListenAndServe(); err != nil && err != http.ErrServerClosed {
log.Fatalf("listen: %s\n", err)
}

View file

@ -24,7 +24,7 @@ import (
type Client struct {
Timeout time.Duration
conn *http.Client
roundTripper *http3.RoundTripper
roundTripper *http3.Transport
ServerAddr string
}

View file

@ -11,6 +11,7 @@ import (
"math/big"
"net/http"
"strconv"
"strings"
"sync"
"time"
@ -32,14 +33,16 @@ type Server struct {
store store.BlobStore
grp *stop.Group
concurrentRequests int
address string
}
// NewServer returns an initialized Server pointer.
func NewServer(store store.BlobStore, requestQueueSize int) *Server {
func NewServer(store store.BlobStore, requestQueueSize int, address string) *Server {
return &Server{
store: store,
grp: stop.New(),
concurrentRequests: requestQueueSize,
address: address,
}
}
@ -66,8 +69,8 @@ type availabilityResponse struct {
}
// Start starts the server listener to handle connections.
func (s *Server) Start(address string) error {
log.Println("HTTP3 peer listening on " + address)
func (s *Server) Start() error {
log.Println("HTTP3 peer listening on " + s.address)
window500M := 500 * 1 << 20
quicConf := &quic.Config{
@ -113,7 +116,7 @@ func (s *Server) Start(address string) error {
}
})
server := http3.Server{
Addr: address,
Addr: s.address,
Handler: r,
TLSConfig: generateTLSConfig(),
QUICConfig: quicConf,
@ -155,7 +158,7 @@ func generateTLSConfig() *tls.Config {
func (s *Server) listenAndServe(server *http3.Server) {
err := server.ListenAndServe()
if err != nil && err != quic.ErrServerClosed {
if err != nil && !strings.Contains(err.Error(), "Server closed") {
log.Errorln(errors.FullTrace(err))
}
}

View file

@ -1,117 +0,0 @@
package http3
import (
"crypto/tls"
"crypto/x509"
"net/http"
"strings"
"sync"
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/quic-go/quic-go"
"github.com/quic-go/quic-go/http3"
)
// Store is a blob store that gets blobs from a peer.
// It satisfies the store.BlobStore interface but cannot put or delete blobs.
type Store struct {
opts StoreOpts
NotFoundCache *sync.Map
}
// StoreOpts allows to set options for a new Store.
type StoreOpts struct {
Address string
Timeout time.Duration
}
// NewStore makes a new peer store.
func NewStore(opts StoreOpts) *Store {
return &Store{opts: opts, NotFoundCache: &sync.Map{}}
}
func (p *Store) getClient() (*Client, error) {
var qconf quic.Config
window500M := 500 * 1 << 20
qconf.MaxStreamReceiveWindow = uint64(window500M)
qconf.MaxConnectionReceiveWindow = uint64(window500M)
qconf.EnableDatagrams = true
qconf.HandshakeIdleTimeout = 4 * time.Second
qconf.MaxIdleTimeout = 20 * time.Second
pool, err := x509.SystemCertPool()
if err != nil {
return nil, err
}
roundTripper := &http3.RoundTripper{
TLSClientConfig: &tls.Config{
RootCAs: pool,
InsecureSkipVerify: true,
},
QUICConfig: &qconf,
}
connection := &http.Client{
Transport: roundTripper,
}
c := &Client{
conn: connection,
roundTripper: roundTripper,
ServerAddr: p.opts.Address,
}
return c, errors.Prefix("connection error", err)
}
func (p *Store) Name() string { return "http3" }
// Has asks the peer if they have a hash
func (p *Store) Has(hash string) (bool, error) {
c, err := p.getClient()
if err != nil {
return false, err
}
defer func() { _ = c.Close() }()
return c.HasBlob(hash)
}
// Get downloads the blob from the peer
func (p *Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
if lastChecked, ok := p.NotFoundCache.Load(hash); ok {
if lastChecked.(time.Time).After(time.Now().Add(-5 * time.Minute)) {
return nil, shared.NewBlobTrace(time.Since(start), p.Name()+"-notfoundcache"), store.ErrBlobNotFound
}
}
c, err := p.getClient()
if err != nil && strings.Contains(err.Error(), "blob not found") {
p.NotFoundCache.Store(hash, time.Now())
}
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), p.Name()), err
}
defer func() { _ = c.Close() }()
return c.GetBlob(hash)
}
// Put is not supported
func (p *Store) Put(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// PutSD is not supported
func (p *Store) PutSD(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// Delete is not supported
func (p *Store) Delete(hash string) error {
return errors.Err(shared.ErrNotImplemented)
}
// Shutdown is not supported
func (p *Store) Shutdown() {
}

View file

@ -31,17 +31,18 @@ const (
// Server is an instance of a peer server that houses the listener and store.
type Server struct {
store store.BlobStore
closed bool
grp *stop.Group
store store.BlobStore
closed bool
grp *stop.Group
address string
}
// NewServer returns an initialized Server pointer.
func NewServer(store store.BlobStore) *Server {
func NewServer(store store.BlobStore, address string) *Server {
return &Server{
store: store,
grp: stop.New(),
store: store,
grp: stop.New(),
address: address,
}
}
@ -53,9 +54,9 @@ func (s *Server) Shutdown() {
}
// Start starts the server listener to handle connections.
func (s *Server) Start(address string) error {
log.Println("peer listening on " + address)
l, err := net.Listen("tcp4", address)
func (s *Server) Start() error {
log.Println("peer listening on " + s.address)
l, err := net.Listen("tcp4", s.address)
if err != nil {
return err
}
@ -306,11 +307,11 @@ func (s *Server) logError(e error) {
}
func readNextMessage(buf *bufio.Reader) ([]byte, error) {
first_byte, err := buf.ReadByte()
firstByte, err := buf.ReadByte()
if err != nil {
return nil, err
}
if first_byte != '{' {
if firstByte != '{' {
// every request starts with '{'. Checking here disconnects earlier, so we don't wait until timeout
return nil, errInvalidData
}

View file

@ -37,7 +37,9 @@ var availabilityRequests = []pair{
}
func getServer(t *testing.T, withBlobs bool) *Server {
st := store.NewMemStore()
st := store.NewMemStore(store.MemParams{
Name: "test",
})
if withBlobs {
for k, v := range blobs {
err := st.Put(k, v)
@ -46,7 +48,7 @@ func getServer(t *testing.T, withBlobs bool) *Server {
}
}
}
return NewServer(st)
return NewServer(st, "127.0.0.1:50505")
}
func TestAvailabilityRequest_NoBlobs(t *testing.T) {
@ -81,7 +83,7 @@ func TestAvailabilityRequest_WithBlobs(t *testing.T) {
func TestRequestFromConnection(t *testing.T) {
s := getServer(t, true)
err := s.Start("127.0.0.1:50505")
err := s.Start()
defer s.Shutdown()
if err != nil {
t.Error("error starting server", err)
@ -111,12 +113,12 @@ func TestRequestFromConnection(t *testing.T) {
func TestInvalidData(t *testing.T) {
s := getServer(t, true)
err := s.Start("127.0.0.1:50503")
err := s.Start()
defer s.Shutdown()
if err != nil {
t.Error("error starting server", err)
}
conn, err := net.Dial("tcp", "127.0.0.1:50503")
conn, err := net.Dial("tcp", "127.0.0.1:50505")
if err != nil {
t.Error("error opening connection", err)
}

View file

@ -1,82 +0,0 @@
package peer
import (
"strings"
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
)
// Store is a blob store that gets blobs from a peer.
// It satisfies the store.BlobStore interface but cannot put or delete blobs.
type Store struct {
opts StoreOpts
}
// StoreOpts allows to set options for a new Store.
type StoreOpts struct {
Address string
Timeout time.Duration
}
// NewStore makes a new peer store.
func NewStore(opts StoreOpts) *Store {
return &Store{opts: opts}
}
func (p *Store) getClient() (*Client, error) {
c := &Client{Timeout: p.opts.Timeout}
err := c.Connect(p.opts.Address)
return c, errors.Prefix("connection error", err)
}
func (p *Store) Name() string { return "peer" }
// Has asks the peer if they have a hash
func (p *Store) Has(hash string) (bool, error) {
c, err := p.getClient()
if err != nil {
return false, err
}
defer func() { _ = c.Close() }()
return c.HasBlob(hash)
}
// Get downloads the blob from the peer
func (p *Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
c, err := p.getClient()
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), p.Name()), err
}
defer func() { _ = c.Close() }()
blob, trace, err := c.GetBlob(hash)
if err != nil && strings.Contains(err.Error(), "blob not found") {
return nil, trace, store.ErrBlobNotFound
}
return blob, trace, err
}
// Put is not supported
func (p *Store) Put(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// PutSD is not supported
func (p *Store) PutSD(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// Delete is not supported
func (p *Store) Delete(hash string) error {
return errors.Err(shared.ErrNotImplemented)
}
// Shutdown is not supported
func (p *Store) Shutdown() {
}

14
server/server.go Normal file
View file

@ -0,0 +1,14 @@
package server
// BlobServer defines the common interface for all blob server implementations
type BlobServer interface {
Start() error
Shutdown()
}
type BlobServerConfig struct {
Address string `mapstructure:"address"`
Port int `mapstructure:"port"`
MaxConcurrentRequests int `mapstructure:"max_concurrent_requests"`
EdgeToken string `mapstructure:"edge_token"`
}

View file

@ -1,6 +1,7 @@
package store
import (
"strings"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
@ -10,6 +11,7 @@ import (
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
// CachingStore combines two stores, typically a local and a remote store, to improve performance.
@ -17,20 +19,77 @@ import (
// are retrieved from the origin and cached. Puts are cached and also forwarded to the origin.
type CachingStore struct {
origin, cache BlobStore
component string
name string
}
type CachingParams struct {
Name string `mapstructure:"name"`
Origin BlobStore `mapstructure:"origin"`
Cache BlobStore `mapstructure:"cache"`
}
type CachingConfig struct {
Name string `mapstructure:"name"`
Origin *viper.Viper
Cache *viper.Viper
}
// NewCachingStore makes a new caching disk store and returns a pointer to it.
func NewCachingStore(component string, origin, cache BlobStore) *CachingStore {
func NewCachingStore(params CachingParams) *CachingStore {
return &CachingStore{
component: component,
origin: WithSingleFlight(component, origin),
cache: WithSingleFlight(component, cache),
name: params.Name,
origin: WithSingleFlight(params.Name, params.Origin),
cache: WithSingleFlight(params.Name, params.Cache),
}
}
const nameCaching = "caching"
func CachingStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg CachingConfig
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
cfg.Cache = config.Sub("cache")
cfg.Origin = config.Sub("origin")
if cfg.Cache == nil || cfg.Origin == nil {
return nil, errors.Err("cache and origin missing")
}
originStoreType := strings.Split(cfg.Origin.AllKeys()[0], ".")[0]
originStoreConfig := cfg.Origin.Sub(originStoreType)
factory, ok := Factories[originStoreType]
if !ok {
return nil, errors.Err("unknown store type %s", originStoreType)
}
originStore, err := factory(originStoreConfig)
if err != nil {
return nil, errors.Err(err)
}
cacheStoreType := strings.Split(cfg.Cache.AllKeys()[0], ".")[0]
cacheStoreConfig := cfg.Cache.Sub(cacheStoreType)
factory, ok = Factories[cacheStoreType]
if !ok {
return nil, errors.Err("unknown store type %s", cacheStoreType)
}
cacheStore, err := factory(cacheStoreConfig)
if err != nil {
return nil, errors.Err(err)
}
return NewCachingStore(CachingParams{
Name: cfg.Name,
Origin: originStore,
Cache: cacheStore,
}), nil
}
func init() {
RegisterStore(nameCaching, CachingStoreFactory)
}
// Name is the cache type name
func (c *CachingStore) Name() string { return nameCaching }
@ -49,17 +108,17 @@ func (c *CachingStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
blob, trace, err := c.cache.Get(hash)
if err == nil || !errors.Is(err, ErrBlobNotFound) {
metrics.CacheHitCount.With(metrics.CacheLabels(c.cache.Name(), c.component)).Inc()
metrics.CacheHitCount.With(metrics.CacheLabels(c.cache.Name(), c.name)).Inc()
rate := float64(len(blob)) / 1024 / 1024 / time.Since(start).Seconds()
metrics.CacheRetrievalSpeed.With(map[string]string{
metrics.LabelCacheType: c.cache.Name(),
metrics.LabelComponent: c.component,
metrics.LabelComponent: c.name,
metrics.LabelSource: "cache",
}).Set(rate)
return blob, trace.Stack(time.Since(start), c.Name()), err
}
metrics.CacheMissCount.With(metrics.CacheLabels(c.cache.Name(), c.component)).Inc()
metrics.CacheMissCount.With(metrics.CacheLabels(c.cache.Name(), c.name)).Inc()
blob, trace, err = c.origin.Get(hash)
if err != nil {

View file

@ -13,9 +13,9 @@ import (
)
func TestCachingStore_Put(t *testing.T) {
origin := NewMemStore()
cache := NewMemStore()
s := NewCachingStore("test", origin, cache)
origin := NewMemStore(MemParams{Name: "test"})
cache := NewMemStore(MemParams{Name: "test"})
s := NewCachingStore(CachingParams{Name: "test", Origin: origin, Cache: cache})
b := []byte("this is a blob of stuff")
hash := "hash"
@ -43,9 +43,9 @@ func TestCachingStore_Put(t *testing.T) {
}
func TestCachingStore_CacheMiss(t *testing.T) {
origin := NewMemStore()
cache := NewMemStore()
s := NewCachingStore("test", origin, cache)
origin := NewMemStore(MemParams{Name: "test"})
cache := NewMemStore(MemParams{Name: "test"})
s := NewCachingStore(CachingParams{Name: "test", Origin: origin, Cache: cache})
b := []byte("this is a blob of stuff")
hash := "hash"
@ -85,8 +85,8 @@ func TestCachingStore_CacheMiss(t *testing.T) {
func TestCachingStore_ThunderingHerd(t *testing.T) {
storeDelay := 100 * time.Millisecond
origin := NewSlowBlobStore(storeDelay)
cache := NewMemStore()
s := NewCachingStore("test", origin, cache)
cache := NewMemStore(MemParams{Name: "test"})
s := NewCachingStore(CachingParams{Name: "test", Origin: origin, Cache: cache})
b := []byte("this is a blob of stuff")
hash := "hash"
@ -141,7 +141,7 @@ type SlowBlobStore struct {
func NewSlowBlobStore(delay time.Duration) *SlowBlobStore {
return &SlowBlobStore{
mem: NewMemStore(),
mem: NewMemStore(MemParams{Name: "test"}),
delay: delay,
}
}
@ -174,6 +174,4 @@ func (s *SlowBlobStore) Delete(hash string) error {
return s.mem.Delete(hash)
}
func (s *SlowBlobStore) Shutdown() {
return
}
func (s *SlowBlobStore) Shutdown() {}

View file

@ -1,108 +0,0 @@
package store
import (
"io"
"net/http"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus"
)
// CloudFrontROStore reads from cloudfront. All writes panic.
type CloudFrontROStore struct {
endpoint string // cloudflare endpoint
}
// NewCloudFrontROStore returns an initialized CloudFrontROStore store pointer.
func NewCloudFrontROStore(endpoint string) *CloudFrontROStore {
return &CloudFrontROStore{endpoint: endpoint}
}
const nameCloudFrontRO = "cloudfront_ro"
// Name is the cache type name
func (c *CloudFrontROStore) Name() string { return nameCloudFrontRO }
// Has checks if the hash is in the store.
func (c *CloudFrontROStore) Has(hash string) (bool, error) {
status, body, err := c.cfRequest(http.MethodHead, hash)
if err != nil {
return false, err
}
defer func() { _ = body.Close() }()
switch status {
case http.StatusNotFound, http.StatusForbidden:
return false, nil
case http.StatusOK:
return true, nil
default:
return false, errors.Err("unexpected status %d", status)
}
}
// Get gets the blob from Cloudfront.
func (c *CloudFrontROStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
log.Debugf("Getting %s from S3", hash[:8])
start := time.Now()
defer func(t time.Time) {
log.Debugf("Getting %s from S3 took %s", hash[:8], time.Since(t).String())
}(start)
status, body, err := c.cfRequest(http.MethodGet, hash)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), err
}
defer func() { _ = body.Close() }()
switch status {
case http.StatusNotFound, http.StatusForbidden:
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err(ErrBlobNotFound)
case http.StatusOK:
b, err := io.ReadAll(body)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err(err)
}
metrics.MtrInBytesS3.Add(float64(len(b)))
return b, shared.NewBlobTrace(time.Since(start), c.Name()), nil
default:
return nil, shared.NewBlobTrace(time.Since(start), c.Name()), errors.Err("unexpected status %d", status)
}
}
func (c *CloudFrontROStore) cfRequest(method, hash string) (int, io.ReadCloser, error) {
url := c.endpoint + hash
req, err := http.NewRequest(method, url, nil)
if err != nil {
return 0, nil, errors.Err(err)
}
req.Header.Add("User-Agent", "reflector.go/"+meta.Version())
res, err := http.DefaultClient.Do(req)
if err != nil {
return 0, nil, errors.Err(err)
}
return res.StatusCode, res.Body, nil
}
func (c *CloudFrontROStore) Put(_ string, _ stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
func (c *CloudFrontROStore) PutSD(_ string, _ stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
func (c *CloudFrontROStore) Delete(_ string) error {
return errors.Err(shared.ErrNotImplemented)
}
// Shutdown shuts down the store gracefully
func (c *CloudFrontROStore) Shutdown() {
}

View file

@ -1,62 +0,0 @@
package store
import (
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/stream"
)
// CloudFrontRWStore combines a Cloudfront and an S3 store. Reads go to Cloudfront/Wasabi, writes go to S3.
type CloudFrontRWStore struct {
cf *ITTTStore
s3 *S3Store
}
// NewCloudFrontRWStore returns an initialized CloudFrontRWStore store pointer.
// NOTE: It panics if either argument is nil.
func NewCloudFrontRWStore(cf *ITTTStore, s3 *S3Store) *CloudFrontRWStore {
if cf == nil || s3 == nil {
panic("both stores must be set")
}
return &CloudFrontRWStore{cf: cf, s3: s3}
}
const nameCloudFrontRW = "cloudfront_rw"
// Name is the cache type name
func (c *CloudFrontRWStore) Name() string { return nameCloudFrontRW }
// Has checks if the hash is in the store.
func (c *CloudFrontRWStore) Has(hash string) (bool, error) {
return c.cf.Has(hash)
}
// Get gets the blob from Cloudfront.
func (c *CloudFrontRWStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
blob, trace, err := c.cf.Get(hash)
return blob, trace.Stack(time.Since(start), c.Name()), err
}
// Put stores the blob on S3
func (c *CloudFrontRWStore) Put(hash string, blob stream.Blob) error {
return c.s3.Put(hash, blob)
}
// PutSD stores the sd blob on S3
func (c *CloudFrontRWStore) PutSD(hash string, blob stream.Blob) error {
return c.s3.PutSD(hash, blob)
}
// Delete deletes the blob from S3
func (c *CloudFrontRWStore) Delete(hash string) error {
return c.s3.Delete(hash)
}
// Shutdown shuts down the store gracefully
func (c *CloudFrontRWStore) Shutdown() {
c.s3.Shutdown()
c.cf.Shutdown()
}

View file

@ -2,16 +2,21 @@ package store
import (
"encoding/json"
"fmt"
"strings"
"sync"
"time"
"github.com/c2h5oh/datasize"
"github.com/lbryio/reflector.go/db"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/extras/stop"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
// DBBackedStore is a store that's backed by a DB. The DB contains data about what's in the store.
@ -21,17 +26,55 @@ type DBBackedStore struct {
blockedMu sync.RWMutex
blocked map[string]bool
deleteOnMiss bool
maxSize int
cleanerStop *stop.Group
name string
}
type DBBackedParams struct {
Name string `mapstructure:"name"`
Store BlobStore `mapstructure:"store"`
DB *db.SQL `mapstructure:"db"`
DeleteOnMiss bool `mapstructure:"delete_on_miss"`
MaxSize *int `mapstructure:"max_size"`
}
type DBBackedConfig struct {
Name string `mapstructure:"name"`
Store *viper.Viper
User string `mapstructure:"user"`
Password string `mapstructure:"password"`
Host string `mapstructure:"host"`
Port int `mapstructure:"port"`
Database string `mapstructure:"database"`
DeleteOnMiss bool `mapstructure:"delete_on_miss"`
AccessTracking int `mapstructure:"access_tracking"`
SoftDeletes bool `mapstructure:"soft_deletes"`
LogQueries bool `mapstructure:"log_queries"`
HasCap bool `mapstructure:"has_cap"`
MaxSize string `mapstructure:"max_size"`
}
// NewDBBackedStore returns an initialized store pointer.
func NewDBBackedStore(blobs BlobStore, db *db.SQL, deleteOnMiss bool) *DBBackedStore {
return &DBBackedStore{blobs: blobs, db: db, deleteOnMiss: deleteOnMiss}
func NewDBBackedStore(params DBBackedParams) *DBBackedStore {
store := &DBBackedStore{
blobs: params.Store,
db: params.DB,
deleteOnMiss: params.DeleteOnMiss,
cleanerStop: stop.New(),
name: params.Name,
}
if params.MaxSize != nil {
store.maxSize = *params.MaxSize
go store.cleanOldestBlobs()
}
return store
}
const nameDBBacked = "db-backed"
const nameDBBacked = "db_backed"
// Name is the cache type name
func (d *DBBackedStore) Name() string { return nameDBBacked }
func (d *DBBackedStore) Name() string { return nameDBBacked + "-" + d.name }
// Has returns true if the blob is in the store
func (d *DBBackedStore) Has(hash string) (bool, error) {
@ -101,34 +144,18 @@ func (d *DBBackedStore) Delete(hash string) error {
// Block deletes the blob and prevents it from being uploaded in the future
func (d *DBBackedStore) Block(hash string) error {
if blocked, err := d.isBlocked(hash); blocked || err != nil {
blocked, err := d.isBlocked(hash)
if blocked || err != nil {
return err
}
log.Debugf("blocking %s", hash)
err := d.db.Block(hash)
err = d.db.Block(hash)
if err != nil {
return err
}
//has, err := d.db.HasBlob(hash, false)
//if err != nil {
// return err
//}
//
//if has {
// err = d.blobs.Delete(hash)
// if err != nil {
// return err
// }
//
// err = d.db.Delete(hash)
// if err != nil {
// return err
// }
//}
return d.markBlocked(hash)
}
@ -195,7 +222,132 @@ func (d *DBBackedStore) initBlocked() error {
return err
}
// cleanOldestBlobs periodically cleans up the oldest blobs if maxSize is set
func (d *DBBackedStore) cleanOldestBlobs() {
// Run on startup without waiting for 10 minutes
err := d.doClean()
if err != nil {
log.Error(errors.FullTrace(err))
}
const cleanupInterval = 10 * time.Minute
for {
select {
case <-d.cleanerStop.Ch():
log.Infoln("stopping self cleanup")
return
case <-time.After(cleanupInterval):
err := d.doClean()
if err != nil {
log.Error(errors.FullTrace(err))
}
}
}
}
// doClean removes the least recently accessed blobs if the store exceeds maxItems
func (d *DBBackedStore) doClean() error {
blobsCount, err := d.db.Count()
if err != nil {
return err
}
if blobsCount >= d.maxSize {
itemsToDelete := blobsCount / 10
blobs, err := d.db.LeastRecentlyAccessedHashes(itemsToDelete)
if err != nil {
return err
}
blobsChan := make(chan string, len(blobs))
wg := stop.New()
go func() {
for _, hash := range blobs {
select {
case <-d.cleanerStop.Ch():
return
default:
}
blobsChan <- hash
}
close(blobsChan)
}()
for i := 0; i < 3; i++ {
wg.Add(1)
go func() {
defer wg.Done()
for h := range blobsChan {
select {
case <-d.cleanerStop.Ch():
return
default:
}
err = d.Delete(h)
if err != nil {
log.Errorf("error pruning %s: %s", h, errors.FullTrace(err))
continue
}
}
}()
}
wg.Wait()
}
return nil
}
// Shutdown shuts down the store gracefully
func (d *DBBackedStore) Shutdown() {
d.cleanerStop.Stop()
d.blobs.Shutdown()
}
func DBBackedStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg DBBackedConfig
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
cfg.Store = config.Sub("store")
storeType := strings.Split(cfg.Store.AllKeys()[0], ".")[0]
storeConfig := cfg.Store.Sub(storeType)
factory, ok := Factories[storeType]
if !ok {
return nil, errors.Err("unknown store type %s", storeType)
}
underlyingStore, err := factory(storeConfig)
if err != nil {
return nil, errors.Err(err)
}
parsedDb := &db.SQL{
TrackingLevel: db.AccessTrackingLevel(cfg.AccessTracking),
SoftDelete: cfg.SoftDeletes,
LogQueries: cfg.LogQueries || log.GetLevel() == log.DebugLevel,
}
err = parsedDb.Connect(fmt.Sprintf("%s:%s@tcp(%s:%d)/%s", cfg.User, cfg.Password, cfg.Host, cfg.Port, cfg.Database))
if err != nil {
return nil, err
}
params := DBBackedParams{
Name: cfg.Name,
Store: underlyingStore,
DB: parsedDb,
DeleteOnMiss: cfg.DeleteOnMiss,
}
if cfg.HasCap {
var parsedSize datasize.ByteSize
err = parsedSize.UnmarshalText([]byte(cfg.MaxSize))
if err != nil {
return nil, errors.Err(err)
}
maxSize := int(float64(parsedSize) / float64(stream.MaxBlobSize))
params.MaxSize = &maxSize
}
return NewDBBackedStore(params), nil
}
func init() {
RegisterStore(nameDBBacked, DBBackedStoreFactory)
}

View file

@ -7,6 +7,7 @@ import (
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/reflector.go/store/speedwalk"
"github.com/spf13/viper"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
@ -14,27 +15,47 @@ import (
// DiskStore stores blobs on a local disk
type DiskStore struct {
// the location of blobs on disk
blobDir string
// store files in subdirectories based on the first N chars in the filename. 0 = don't create subdirectories.
prefixLength int
name string
// true if initOnce ran, false otherwise
initialized bool
}
type DiskParams struct {
Name string `mapstructure:"name"`
MountPoint string `mapstructure:"mount_point"`
ShardingSize int `mapstructure:"sharding_size"`
}
// NewDiskStore returns an initialized file disk store pointer.
func NewDiskStore(dir string, prefixLength int) *DiskStore {
func NewDiskStore(params DiskParams) *DiskStore {
return &DiskStore{
blobDir: dir,
prefixLength: prefixLength,
blobDir: params.MountPoint,
prefixLength: params.ShardingSize,
name: params.Name,
}
}
const nameDisk = "disk"
func DiskStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg DiskParams
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
return NewDiskStore(cfg), nil
}
func init() {
RegisterStore(nameDisk, DiskStoreFactory)
}
// Name is the cache type name
func (d *DiskStore) Name() string { return nameDisk }
func (d *DiskStore) Name() string { return nameDisk + "-" + d.name }
// Has returns T/F or Error if it the blob stored already. It will error with any IO disk error.
func (d *DiskStore) Has(hash string) (bool, error) {
@ -111,14 +132,14 @@ func (d *DiskStore) dir(hash string) string {
}
return path.Join(d.blobDir, hash[:d.prefixLength])
}
func (d *DiskStore) tmpDir(hash string) string {
func (d *DiskStore) tmpDir() string {
return path.Join(d.blobDir, "tmp")
}
func (d *DiskStore) path(hash string) string {
return path.Join(d.dir(hash), hash)
}
func (d *DiskStore) tmpPath(hash string) string {
return path.Join(d.tmpDir(hash), hash)
return path.Join(d.tmpDir(), hash)
}
func (d *DiskStore) ensureDirExists(dir string) error {
return errors.Err(os.MkdirAll(dir, 0755))
@ -133,7 +154,7 @@ func (d *DiskStore) initOnce() error {
if err != nil {
return err
}
err = d.ensureDirExists(path.Join(d.blobDir, "tmp"))
err = d.ensureDirExists(d.tmpDir())
if err != nil {
return err
}

View file

@ -16,7 +16,11 @@ func TestDiskStore_Get(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "reflector_test_*")
require.NoError(t, err)
defer func() { _ = os.RemoveAll(tmpDir) }()
d := NewDiskStore(tmpDir, 2)
d := NewDiskStore(DiskParams{
Name: "test",
MountPoint: tmpDir,
ShardingSize: 2,
})
hash := "f428b8265d65dad7f8ffa52922bba836404cbd62f3ecfe10adba6b444f8f658938e54f5981ac4de39644d5b93d89a94b"
data := []byte("oyuntyausntoyaunpdoyruoyduanrstjwfjyuwf")
@ -36,7 +40,11 @@ func TestDiskStore_GetNonexistentBlob(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "reflector_test_*")
require.NoError(t, err)
defer func() { _ = os.RemoveAll(tmpDir) }()
d := NewDiskStore(tmpDir, 2)
d := NewDiskStore(DiskParams{
Name: "test",
MountPoint: tmpDir,
ShardingSize: 2,
})
blob, _, err := d.Get("nonexistent")
assert.Nil(t, blob)

View file

@ -1,6 +1,7 @@
package store
import (
"strings"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
@ -11,15 +12,16 @@ import (
"github.com/bluele/gcache"
"github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
// GcacheStore adds a max cache size and Greedy-Dual-Size-Frequency cache eviction strategy to a BlobStore
type GcacheStore struct {
// underlying store
store BlobStore
// cache implementation
cache gcache.Cache
underlyingStore BlobStore
cache gcache.Cache
name string
}
type EvictionStrategy int
const (
@ -33,16 +35,30 @@ const (
SIMPLE
)
type GcacheParams struct {
Name string `mapstructure:"name"`
Store BlobStore `mapstructure:"store"`
MaxSize int `mapstructure:"max_size"`
Strategy EvictionStrategy `mapstructure:"strategy"`
}
type GcacheConfig struct {
Name string `mapstructure:"name"`
Store *viper.Viper
MaxSize int `mapstructure:"max_size"`
Strategy EvictionStrategy `mapstructure:"strategy"`
}
// NewGcacheStore initialize a new LRUStore
func NewGcacheStore(component string, store BlobStore, maxSize int, strategy EvictionStrategy) *GcacheStore {
cacheBuilder := gcache.New(maxSize)
func NewGcacheStore(params GcacheParams) *GcacheStore {
cacheBuilder := gcache.New(params.MaxSize)
var cache gcache.Cache
evictFunc := func(key interface{}, value interface{}) {
logrus.Infof("evicting %s", key)
metrics.CacheLRUEvictCount.With(metrics.CacheLabels(store.Name(), component)).Inc()
_ = store.Delete(key.(string)) // TODO: log this error. may happen if underlying entry is gone but cache entry still there
metrics.CacheLRUEvictCount.With(metrics.CacheLabels(params.Store.Name(), params.Name)).Inc()
_ = params.Store.Delete(key.(string)) // TODO: log this error. may happen if underlying entry is gone but cache entry still there
}
switch strategy {
switch params.Strategy {
case LFU:
cache = cacheBuilder.LFU().EvictedFunc(evictFunc).Build()
case ARC:
@ -51,15 +67,15 @@ func NewGcacheStore(component string, store BlobStore, maxSize int, strategy Evi
cache = cacheBuilder.LRU().EvictedFunc(evictFunc).Build()
case SIMPLE:
cache = cacheBuilder.Simple().EvictedFunc(evictFunc).Build()
}
l := &GcacheStore{
store: store,
cache: cache,
underlyingStore: params.Store,
cache: cache,
name: params.Name,
}
go func() {
if lstr, ok := store.(lister); ok {
err := l.loadExisting(lstr, maxSize)
if lstr, ok := params.Store.(lister); ok {
err := l.loadExisting(lstr, params.MaxSize)
if err != nil {
panic(err) // TODO: what should happen here? panic? return nil? just keep going?
}
@ -71,8 +87,40 @@ func NewGcacheStore(component string, store BlobStore, maxSize int, strategy Evi
const nameGcache = "gcache"
func GcacheStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg GcacheConfig
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
cfg.Store = config.Sub("store")
storeType := strings.Split(cfg.Store.AllKeys()[0], ".")[0]
storeConfig := cfg.Store.Sub(storeType)
factory, ok := Factories[storeType]
if !ok {
return nil, errors.Err("unknown store type %s", storeType)
}
underlyingStore, err := factory(storeConfig)
if err != nil {
return nil, errors.Err(err)
}
return NewGcacheStore(GcacheParams{
Name: cfg.Name,
Store: underlyingStore,
MaxSize: cfg.MaxSize,
Strategy: cfg.Strategy,
}), nil
}
func init() {
RegisterStore(nameGcache, GcacheStoreFactory)
}
// Name is the cache type name
func (l *GcacheStore) Name() string { return nameGcache }
func (l *GcacheStore) Name() string { return nameGcache + "-" + l.name }
// Has returns whether the blob is in the store, without updating the recent-ness.
func (l *GcacheStore) Has(hash string) (bool, error) {
@ -86,7 +134,7 @@ func (l *GcacheStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), l.Name()), errors.Err(ErrBlobNotFound)
}
blob, stack, err := l.store.Get(hash)
blob, stack, err := l.underlyingStore.Get(hash)
if errors.Is(err, ErrBlobNotFound) {
// Blob disappeared from underlying store
l.cache.Remove(hash)
@ -99,7 +147,7 @@ func (l *GcacheStore) Put(hash string, blob stream.Blob) error {
_ = l.cache.Set(hash, true)
has, _ := l.Has(hash)
if has {
err := l.store.Put(hash, blob)
err := l.underlyingStore.Put(hash, blob)
if err != nil {
return err
}
@ -112,7 +160,7 @@ func (l *GcacheStore) PutSD(hash string, blob stream.Blob) error {
_ = l.cache.Set(hash, true)
has, _ := l.Has(hash)
if has {
err := l.store.PutSD(hash, blob)
err := l.underlyingStore.PutSD(hash, blob)
if err != nil {
return err
}
@ -122,7 +170,7 @@ func (l *GcacheStore) PutSD(hash string, blob stream.Blob) error {
// Delete deletes the blob from the store
func (l *GcacheStore) Delete(hash string) error {
err := l.store.Delete(hash)
err := l.underlyingStore.Delete(hash)
if err != nil {
return err
}

View file

@ -16,8 +16,8 @@ import (
const cacheMaxSize = 3
func getTestGcacheStore() (*GcacheStore, *MemStore) {
m := NewMemStore()
return NewGcacheStore("test", m, cacheMaxSize, LFU), m
m := NewMemStore(MemParams{Name: "test"})
return NewGcacheStore(GcacheParams{Name: "test", Store: m, MaxSize: cacheMaxSize, Strategy: LFU}), m
}
func TestGcacheStore_Eviction(t *testing.T) {
@ -90,7 +90,11 @@ func TestGcacheStore_loadExisting(t *testing.T) {
tmpDir, err := os.MkdirTemp("", "reflector_test_*")
require.NoError(t, err)
defer func() { _ = os.RemoveAll(tmpDir) }()
d := NewDiskStore(tmpDir, 2)
d := NewDiskStore(DiskParams{
Name: "test",
MountPoint: tmpDir,
ShardingSize: 2,
})
hash := "hash"
b := []byte("this is a blob of stuff")
@ -102,8 +106,8 @@ func TestGcacheStore_loadExisting(t *testing.T) {
require.Equal(t, 1, len(existing), "blob should exist in cache")
assert.Equal(t, hash, existing[0])
lfu := NewGcacheStore("test", d, 3, LFU) // lru should load existing blobs when it's created
time.Sleep(100 * time.Millisecond) // async load so let's wait...
lfu := NewGcacheStore(GcacheParams{Name: "test", Store: d, MaxSize: 3, Strategy: LFU}) // lru should load existing blobs when it's created
time.Sleep(100 * time.Millisecond) // async load so let's wait...
has, err := lfu.Has(hash)
require.NoError(t, err)
assert.True(t, has, "hash should be loaded from disk store but it's not")

View file

@ -1,170 +1,188 @@
package store
import (
"bytes"
"context"
"io"
"net"
"net/http"
"sync"
"path"
"strconv"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/meta"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
// HttpStore is a store that works on top of the HTTP protocol
// HttpStore reads from an HTTP endpoint that simply expects the hash to be appended to the endpoint
type HttpStore struct {
upstream string
httpClient *http.Client
edgeToken string
endpoint string
httpClient *http.Client
prefixLength int
name string
}
func NewHttpStore(upstream, edgeToken string) *HttpStore {
type HttpParams struct {
Name string `mapstructure:"name"`
Endpoint string `mapstructure:"endpoint"`
ShardingSize int `mapstructure:"sharding_size"`
}
// NewHttpStore returns an initialized HttpStore store pointer.
func NewHttpStore(params HttpParams) *HttpStore {
return &HttpStore{
upstream: "http://" + upstream,
httpClient: getClient(),
edgeToken: edgeToken,
endpoint: params.Endpoint,
httpClient: getClient(),
prefixLength: params.ShardingSize,
name: params.Name,
}
}
const nameHttp = "http"
func (n *HttpStore) Name() string { return nameHttp }
func (n *HttpStore) Has(hash string) (bool, error) {
url := n.upstream + "/blob?hash=" + hash
req, err := http.NewRequest("HEAD", url, nil)
func HttpStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg HttpParams
err := config.Unmarshal(&cfg)
if err != nil {
return false, errors.Err(err)
return nil, errors.Err(err)
}
return NewHttpStore(cfg), nil
}
res, err := n.httpClient.Do(req)
// Name is the cache type name
func (c *HttpStore) Name() string { return nameHttp + "-" + c.name }
// Has checks if the hash is in the store.
func (c *HttpStore) Has(hash string) (bool, error) {
status, body, err := c.cfRequest(http.MethodHead, hash)
if err != nil {
return false, errors.Err(err)
return false, err
}
defer func() { _ = res.Body.Close() }()
if res.StatusCode == http.StatusNotFound {
defer func() { _ = body.Close() }()
switch status {
case http.StatusNotFound, http.StatusForbidden:
return false, nil
}
if res.StatusCode == http.StatusNoContent {
case http.StatusOK:
return true, nil
default:
return false, errors.Err("unexpected status %d", status)
}
var body []byte
if res.Body != nil {
body, _ = io.ReadAll(res.Body)
}
return false, errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
}
func (n *HttpStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
// Get downloads the blob using the http client
func (c *HttpStore) Get(hash string) (b stream.Blob, trace shared.BlobTrace, err error) {
log.Debugf("Getting %s from HTTP(s) source", hash[:8])
start := time.Now()
url := n.upstream + "/blob?hash=" + hash
if n.edgeToken != "" {
url += "&edge_token=" + n.edgeToken
}
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), n.Name()), errors.Err(err)
}
defer func(t time.Time) {
log.Debugf("Getting %s from HTTP(s) source took %s", hash[:8], time.Since(t).String())
trace = trace.Stack(time.Since(start), c.Name())
}(start)
res, err := n.httpClient.Do(req)
url := c.endpoint + c.shardedPath(hash)
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), n.Name()), errors.Err(err)
return nil, trace, errors.Err(err)
}
defer func() { _ = res.Body.Close() }()
tmp := getBuffer()
defer putBuffer(tmp)
serialized := res.Header.Get("Via")
trace := shared.NewBlobTrace(time.Since(start), n.Name())
if serialized != "" {
parsedTrace, err := shared.Deserialize(serialized)
req.Header.Add("User-Agent", "reflector.go/"+meta.Version())
res, err := c.httpClient.Do(req)
if err != nil {
return nil, trace, errors.Err(err)
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), n.Name()), err
log.Errorf("Error closing response body in HTTP-GET: %s", err.Error())
}
}(res.Body)
// Parse Via header if present
viaHeader := res.Header.Get("Via")
if viaHeader != "" {
parsedTrace, err := shared.Deserialize(viaHeader)
if err == nil {
trace = *parsedTrace
}
trace = *parsedTrace
}
if res.StatusCode == http.StatusNotFound {
return nil, trace.Stack(time.Since(start), n.Name()), ErrBlobNotFound
}
if res.StatusCode == http.StatusOK {
written, err := io.Copy(tmp, res.Body)
if err != nil {
return nil, trace.Stack(time.Since(start), n.Name()), errors.Err(err)
switch res.StatusCode {
case http.StatusNotFound:
return nil, trace, ErrBlobNotFound
case http.StatusOK:
contentLength := res.Header.Get("Content-Length")
if contentLength != "" {
size, err := strconv.Atoi(contentLength)
if err == nil && size > 0 && size <= stream.MaxBlobSize {
blob := make([]byte, size)
_, err = io.ReadFull(res.Body, blob)
if err == nil {
metrics.MtrInBytesHttp.Add(float64(size))
return blob, trace, nil
}
log.Warnf("Error reading body with known size: %s", err.Error())
}
}
blob := make([]byte, written)
copy(blob, tmp.Bytes())
buffer := getBuffer()
defer putBuffer(buffer)
if _, err := io.Copy(buffer, res.Body); err != nil {
return nil, trace, errors.Err(err)
}
blob := make([]byte, buffer.Len())
copy(blob, buffer.Bytes())
metrics.MtrInBytesHttp.Add(float64(len(blob)))
return blob, trace.Stack(time.Since(start), n.Name()), nil
return blob, trace, nil
default:
body, _ := io.ReadAll(res.Body)
log.Warnf("Got status code %d (%s)", res.StatusCode, string(body))
return nil, trace, errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
}
var body []byte
if res.Body != nil {
body, _ = io.ReadAll(res.Body)
}
func (c *HttpStore) cfRequest(method, hash string) (int, io.ReadCloser, error) {
url := c.endpoint + c.shardedPath(hash)
req, err := http.NewRequest(method, url, nil)
if err != nil {
return 0, nil, errors.Err(err)
}
req.Header.Add("User-Agent", "reflector.go/"+meta.Version())
res, err := c.httpClient.Do(req)
if err != nil {
return 0, nil, errors.Err(err)
}
return nil, trace.Stack(time.Since(start), n.Name()), errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
return res.StatusCode, res.Body, nil
}
func (n *HttpStore) Put(string, stream.Blob) error {
return shared.ErrNotImplemented
}
func (n *HttpStore) PutSD(string, stream.Blob) error {
return shared.ErrNotImplemented
}
func (n *HttpStore) Delete(string) error {
return shared.ErrNotImplemented
}
func (n *HttpStore) Shutdown() {}
// buffer pool to reduce GC
// https://www.captaincodeman.com/2017/06/02/golang-buffer-pool-gotcha
var buffers = sync.Pool{
// New is called when a new instance is needed
New: func() interface{} {
buf := make([]byte, 0, stream.MaxBlobSize)
return bytes.NewBuffer(buf)
},
func (c *HttpStore) Put(_ string, _ stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// getBuffer fetches a buffer from the pool
func getBuffer() *bytes.Buffer {
return buffers.Get().(*bytes.Buffer)
func (c *HttpStore) PutSD(_ string, _ stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// putBuffer returns a buffer to the pool
func putBuffer(buf *bytes.Buffer) {
buf.Reset()
buffers.Put(buf)
func (c *HttpStore) Delete(_ string) error {
return errors.Err(shared.ErrNotImplemented)
}
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
// Shutdown shuts down the store gracefully
func (c *HttpStore) Shutdown() {
}
func (c *HttpStore) shardedPath(hash string) string {
if c.prefixLength <= 0 || len(hash) < c.prefixLength {
return hash
}
return dialer.DialContext(ctx, network, address)
return path.Join(hash[:c.prefixLength], hash)
}
// getClient gets an http client that's customized to be more performant when dealing with blobs of 2MB in size (most of our blobs)
func getClient() *http.Client {
// Customize the Transport to have larger connection pool
defaultTransport := &http.Transport{
DialContext: dialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DisableCompression: true,
MaxIdleConnsPerHost: 100,
ReadBufferSize: stream.MaxBlobSize + 1024*10, //add an extra few KBs to make sure it fits the extra information
}
return &http.Client{Transport: defaultTransport}
func init() {
RegisterStore(nameHttp, HttpStoreFactory)
}

135
store/http3.go Normal file
View file

@ -0,0 +1,135 @@
package store
import (
"strings"
"sync"
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/spf13/viper"
)
// Http3Store is a blob store that gets blobs from a peer over HTTP3.
// It satisfies the BlobStore interface but cannot put or delete blobs.
type Http3Store struct {
NotFoundCache *sync.Map
name string
address string
timeout time.Duration
client *Http3Client
clientMu sync.RWMutex
}
// Http3Params allows to set options for a new Http3Store.
type Http3Params struct {
Name string `mapstructure:"name"`
Address string `mapstructure:"address"`
Timeout time.Duration `mapstructure:"timeout"`
}
// NewHttp3Store makes a new HTTP3 store.
func NewHttp3Store(params Http3Params) *Http3Store {
return &Http3Store{
name: params.Name,
NotFoundCache: &sync.Map{},
address: params.Address,
timeout: params.Timeout,
}
}
const nameHttp3 = "http3"
func Http3StoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg Http3Params
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
return NewHttp3Store(cfg), nil
}
func init() {
RegisterStore(nameHttp3, Http3StoreFactory)
}
func (h *Http3Store) Name() string { return nameHttp3 + "-" + h.name }
func (h *Http3Store) getClient() (*Http3Client, error) {
h.clientMu.RLock()
if h.client != nil {
client := h.client
h.clientMu.RUnlock()
return client, nil
}
h.clientMu.RUnlock()
h.clientMu.Lock()
defer h.clientMu.Unlock()
// Check again in case another goroutine created the client
if h.client != nil {
return h.client, nil
}
client, err := NewHttp3Client(h.address)
if err != nil {
return nil, err
}
h.client = client
return client, nil
}
// Has asks the peer if they have a hash
func (h *Http3Store) Has(hash string) (bool, error) {
c, err := h.getClient()
if err != nil {
return false, err
}
return c.HasBlob(hash)
}
// Get downloads the blob from the peer
func (h *Http3Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
if lastChecked, ok := h.NotFoundCache.Load(hash); ok {
if lastChecked.(time.Time).After(time.Now().Add(-5 * time.Minute)) {
return nil, shared.NewBlobTrace(time.Since(start), h.Name()+"-notfoundcache"), ErrBlobNotFound
}
}
c, err := h.getClient()
if err != nil && strings.Contains(err.Error(), "blob not found") {
h.NotFoundCache.Store(hash, time.Now())
}
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), h.Name()), err
}
return c.GetBlob(hash)
}
// Put is not supported
func (h *Http3Store) Put(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// PutSD is not supported
func (h *Http3Store) PutSD(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// Delete is not supported
func (h *Http3Store) Delete(hash string) error {
return errors.Err(shared.ErrNotImplemented)
}
// Shutdown shuts down the store gracefully
func (h *Http3Store) Shutdown() {
h.clientMu.Lock()
defer h.clientMu.Unlock()
if h.client != nil {
_ = h.client.Close()
h.client = nil
}
}

137
store/http3_client.go Normal file
View file

@ -0,0 +1,137 @@
package store
import (
"crypto/tls"
"crypto/x509"
"io"
"net/http"
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/quic-go/quic-go"
"github.com/quic-go/quic-go/http3"
)
// Http3Client is a client for HTTP3 blob store
type Http3Client struct {
conn *http.Client
roundTripper *http3.Transport
ServerAddr string
}
// NewHttp3Client creates a new HTTP3 client
func NewHttp3Client(address string) (*Http3Client, error) {
var qconf quic.Config
window500M := 500 * 1 << 20
qconf.MaxStreamReceiveWindow = uint64(window500M)
qconf.MaxConnectionReceiveWindow = uint64(window500M)
qconf.EnableDatagrams = true
qconf.HandshakeIdleTimeout = 4 * time.Second
qconf.MaxIdleTimeout = 20 * time.Second
pool, err := x509.SystemCertPool()
if err != nil {
return nil, err
}
roundTripper := &http3.Transport{
TLSClientConfig: &tls.Config{
RootCAs: pool,
InsecureSkipVerify: true,
},
QUICConfig: &qconf,
}
connection := &http.Client{
Transport: roundTripper,
}
return &Http3Client{
conn: connection,
roundTripper: roundTripper,
ServerAddr: address,
}, nil
}
// Close closes the client
func (c *Http3Client) Close() error {
return nil
}
// HasBlob checks if the peer has a blob
func (c *Http3Client) HasBlob(hash string) (bool, error) {
url := c.ServerAddr + "/blob?hash=" + hash
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return false, errors.Err(err)
}
res, err := c.conn.Do(req)
if err != nil {
return false, errors.Err(err)
}
defer func() { _ = res.Body.Close() }()
if res.StatusCode == http.StatusNotFound {
return false, nil
}
if res.StatusCode == http.StatusNoContent {
return true, nil
}
var body []byte
if res.Body != nil {
body, _ = io.ReadAll(res.Body)
}
return false, errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
}
// GetBlob gets a blob from the peer
func (c *Http3Client) GetBlob(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
url := c.ServerAddr + "/blob?hash=" + hash
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "http3"), errors.Err(err)
}
res, err := c.conn.Do(req)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "http3"), errors.Err(err)
}
defer func() { _ = res.Body.Close() }()
viaHeader := res.Header.Get("Via")
var trace shared.BlobTrace
if viaHeader != "" {
parsedTrace, err := shared.Deserialize(viaHeader)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "http3"), err
}
trace = *parsedTrace
} else {
trace = shared.NewBlobTrace(0, "http3")
}
switch res.StatusCode {
case http.StatusNotFound:
return nil, trace.Stack(time.Since(start), "http3"), ErrBlobNotFound
case http.StatusOK:
buffer := getBuffer()
defer putBuffer(buffer)
if _, err := io.Copy(buffer, res.Body); err != nil {
return nil, trace.Stack(time.Since(start), "http3"), errors.Err(err)
}
blob := make([]byte, buffer.Len())
copy(blob, buffer.Bytes())
return blob, trace.Stack(time.Since(start), "http3"), nil
default:
body, _ := io.ReadAll(res.Body)
return nil, trace.Stack(time.Since(start), "http3"),
errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
}
}

View file

@ -1,6 +1,7 @@
package store
import (
"strings"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
@ -8,25 +9,83 @@ import (
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/spf13/viper"
)
// ITTTStore performs an operation on this storage, if this fails, it attempts to run it on that
type ITTTStore struct {
this, that BlobStore
name string
}
type ITTTParams struct {
Name string `mapstructure:"name"`
This BlobStore `mapstructure:"this"`
That BlobStore `mapstructure:"that"`
}
type ITTTConfig struct {
Name string `mapstructure:"name"`
This *viper.Viper
That *viper.Viper
}
// NewITTTStore returns a new instance of the IF THIS THAN THAT store
func NewITTTStore(this, that BlobStore) *ITTTStore {
func NewITTTStore(params ITTTParams) *ITTTStore {
return &ITTTStore{
this: this,
that: that,
this: params.This,
that: params.That,
name: params.Name,
}
}
const nameIttt = "ittt"
func ITTTStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg ITTTConfig
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
cfg.This = config.Sub("this")
cfg.That = config.Sub("that")
thisStoreType := strings.Split(cfg.This.AllKeys()[0], ".")[0]
thisStoreConfig := cfg.This.Sub(thisStoreType)
factory, ok := Factories[thisStoreType]
if !ok {
return nil, errors.Err("unknown store type %s", thisStoreType)
}
thisStore, err := factory(thisStoreConfig)
if err != nil {
return nil, errors.Err(err)
}
thatStoreType := strings.Split(cfg.That.AllKeys()[0], ".")[0]
thatStoreConfig := cfg.That.Sub(thatStoreType)
factory, ok = Factories[thatStoreType]
if !ok {
return nil, errors.Err("unknown store type %s", thatStoreType)
}
thatStore, err := factory(thatStoreConfig)
if err != nil {
return nil, errors.Err(err)
}
return NewITTTStore(ITTTParams{
Name: cfg.Name,
This: thisStore,
That: thatStore,
}), nil
}
func init() {
RegisterStore(nameIttt, ITTTStoreFactory)
}
// Name is the cache type name
func (c *ITTTStore) Name() string { return nameIttt }
func (c *ITTTStore) Name() string { return nameIttt + "-" + c.name }
// Has checks in this for a hash, if it fails it checks in that. It returns true if either store has it.
func (c *ITTTStore) Has(hash string) (bool, error) {
@ -42,7 +101,7 @@ func (c *ITTTStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
blob, trace, err := c.this.Get(hash)
if err == nil {
metrics.ThisHitCount.Inc()
metrics.ItttHitCount.With(metrics.ItttLabels(c.this.Name())).Inc()
return blob, trace.Stack(time.Since(start), c.Name()), err
}
@ -50,7 +109,7 @@ func (c *ITTTStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
if err != nil {
return nil, trace.Stack(time.Since(start), c.Name()), err
}
metrics.ThatHitCount.Inc()
metrics.ItttHitCount.With(metrics.ItttLabels(c.that.Name())).Inc()
return blob, trace.Stack(time.Since(start), c.Name()), nil
}
@ -70,4 +129,7 @@ func (c *ITTTStore) Delete(hash string) error {
}
// Shutdown shuts down the store gracefully
func (c *ITTTStore) Shutdown() {}
func (c *ITTTStore) Shutdown() {
c.this.Shutdown()
c.that.Shutdown()
}

View file

@ -8,25 +8,45 @@ import (
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/spf13/viper"
)
// MemStore is an in memory only blob store with no persistence.
type MemStore struct {
blobs map[string]stream.Blob
mu *sync.RWMutex
name string
}
func NewMemStore() *MemStore {
type MemParams struct {
Name string `mapstructure:"name"`
}
func NewMemStore(params MemParams) *MemStore {
return &MemStore{
blobs: make(map[string]stream.Blob),
mu: &sync.RWMutex{},
name: params.Name,
}
}
const nameMem = "mem"
func MemStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg MemParams
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
return NewMemStore(cfg), nil
}
func init() {
RegisterStore(nameMem, MemStoreFactory)
}
// Name is the cache type name
func (m *MemStore) Name() string { return nameMem }
func (m *MemStore) Name() string { return nameMem + "-" + m.name }
// Has returns T/F if the blob is currently stored. It will never error.
func (m *MemStore) Has(hash string) (bool, error) {

View file

@ -8,7 +8,7 @@ import (
)
func TestMemStore_Put(t *testing.T) {
s := NewMemStore()
s := NewMemStore(MemParams{Name: "test"})
blob := []byte("abcdefg")
err := s.Put("abc", blob)
if err != nil {
@ -17,7 +17,7 @@ func TestMemStore_Put(t *testing.T) {
}
func TestMemStore_Get(t *testing.T) {
s := NewMemStore()
s := NewMemStore(MemParams{Name: "test"})
hash := "abc"
blob := []byte("abcdefg")
err := s.Put(hash, blob)

204
store/multiwriter.go Normal file
View file

@ -0,0 +1,204 @@
package store
import (
"strings"
"sync"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/lbryio/reflector.go/shared"
"github.com/spf13/viper"
)
// MultiWriterStore writes to multiple destination stores
type MultiWriterStore struct {
name string
destinations []BlobStore
}
type MultiWriterParams struct {
Name string
Destinations []BlobStore
}
type MultiWriterConfig struct {
Name string `mapstructure:"name"`
One viper.Viper
Two viper.Viper
Three viper.Viper
}
// NewMultiWriterStore returns a new instance of the MultiWriter store
func NewMultiWriterStore(params MultiWriterParams) *MultiWriterStore {
return &MultiWriterStore{
name: params.Name,
destinations: params.Destinations,
}
}
const nameMultiWriter = "multiwriter"
func MultiWriterStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg MultiWriterConfig
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
var destinations []BlobStore
one := config.Sub("one")
two := config.Sub("two")
//three := config.Sub("three")
storeTypeOne := strings.Split(one.AllKeys()[0], ".")[0]
storeTypeTwo := strings.Split(two.AllKeys()[0], ".")[0]
//storeTypeThree := strings.Split(three.AllKeys()[0], ".")[0]
storeCfgOne := one.Sub(storeTypeOne)
storeCfgTwo := two.Sub(storeTypeTwo)
//storeCfgThree := config.Sub(storeTypeThree)
factoryOne, ok := Factories[storeTypeOne]
if !ok {
return nil, errors.Err("unknown store type %s", storeTypeOne)
}
factoryTwo, ok := Factories[storeTypeTwo]
if !ok {
return nil, errors.Err("unknown store type %s", storeTypeTwo)
}
//factoryThree, ok := Factories[storeTypeThree]
//if !ok {
// return nil, errors.Err("unknown store type %s", storeTypeThree)
//}
store1, err := factoryOne(storeCfgOne)
if err != nil {
return nil, errors.Err(err)
}
store2, err := factoryTwo(storeCfgTwo)
if err != nil {
return nil, errors.Err(err)
}
//store3, err := factoryThree(storeCfgThree)
//if err != nil {
// return nil, errors.Err(err)
//}
destinations = append(destinations, store1, store2)
return NewMultiWriterStore(MultiWriterParams{
Name: cfg.Name,
Destinations: destinations,
}), nil
}
func init() {
RegisterStore(nameMultiWriter, MultiWriterStoreFactory)
}
// Name returns the store name
func (m *MultiWriterStore) Name() string { return nameMultiWriter + "-" + m.name }
// Has is not supported by MultiWriter
func (m *MultiWriterStore) Has(hash string) (bool, error) {
return false, errors.Err(shared.ErrNotImplemented)
}
// Get is not supported by MultiWriter
func (m *MultiWriterStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
return nil, shared.BlobTrace{}, errors.Err(shared.ErrNotImplemented)
}
// Put writes the blob to all destination stores
func (m *MultiWriterStore) Put(hash string, blob stream.Blob) error {
var wg sync.WaitGroup
errChan := make(chan error, len(m.destinations))
for _, dest := range m.destinations {
wg.Add(1)
go func(d BlobStore) {
defer wg.Done()
if err := d.Put(hash, blob); err != nil {
errChan <- errors.Err("failed to write to %s: %v", d.Name(), err)
}
}(dest)
}
wg.Wait()
close(errChan)
var errs []string
for err := range errChan {
errs = append(errs, err.Error())
}
if len(errs) > 0 {
return errors.Err("failed to write to some destinations: %s", strings.Join(errs, "; "))
}
return nil
}
// PutSD writes the SD blob to all destination stores
func (m *MultiWriterStore) PutSD(hash string, blob stream.Blob) error {
var wg sync.WaitGroup
errChan := make(chan error, len(m.destinations))
for _, dest := range m.destinations {
wg.Add(1)
go func(d BlobStore) {
defer wg.Done()
if err := d.PutSD(hash, blob); err != nil {
errChan <- errors.Err("failed to write SD to %s: %v", d.Name(), err)
}
}(dest)
}
wg.Wait()
close(errChan)
var errs []string
for err := range errChan {
errs = append(errs, err.Error())
}
if len(errs) > 0 {
return errors.Err("failed to write SD to some destinations: %s", strings.Join(errs, "; "))
}
return nil
}
// Delete deletes the blob from all destination stores
func (m *MultiWriterStore) Delete(hash string) error {
var wg sync.WaitGroup
errChan := make(chan error, len(m.destinations))
for _, dest := range m.destinations {
wg.Add(1)
go func(d BlobStore) {
defer wg.Done()
if err := d.Delete(hash); err != nil {
errChan <- errors.Err("failed to delete from %s: %v", d.Name(), err)
}
}(dest)
}
wg.Wait()
close(errChan)
var errs []string
for err := range errChan {
errs = append(errs, err.Error())
}
if len(errs) > 0 {
return errors.Err("failed to delete from some destinations: %s", strings.Join(errs, "; "))
}
return nil
}
// Shutdown shuts down all destination stores gracefully
func (m *MultiWriterStore) Shutdown() {
for _, dest := range m.destinations {
dest.Shutdown()
}
}

View file

@ -5,15 +5,38 @@ import (
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/spf13/viper"
)
// NoopStore is a store that does nothing
type NoopStore struct{}
type NoopStore struct {
name string
}
func NewNoopStore(name string) *NoopStore {
return &NoopStore{name: name}
}
const nameNoop = "noop"
func (n *NoopStore) Name() string { return nameNoop }
func NoopStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg struct {
Name string `mapstructure:"name"`
}
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
return NewNoopStore(cfg.Name), nil
}
func init() {
RegisterStore(nameNoop, NoopStoreFactory)
}
func (n *NoopStore) Name() string { return nameNoop + "-" + n.name }
func (n *NoopStore) Has(_ string) (bool, error) { return false, nil }
func (n *NoopStore) Get(_ string) (stream.Blob, shared.BlobTrace, error) {
return nil, shared.NewBlobTrace(time.Since(time.Now()), n.Name()), nil
@ -21,4 +44,4 @@ func (n *NoopStore) Get(_ string) (stream.Blob, shared.BlobTrace, error) {
func (n *NoopStore) Put(_ string, _ stream.Blob) error { return nil }
func (n *NoopStore) PutSD(_ string, _ stream.Blob) error { return nil }
func (n *NoopStore) Delete(_ string) error { return nil }
func (n *NoopStore) Shutdown() { return }
func (n *NoopStore) Shutdown() {}

99
store/peer.go Normal file
View file

@ -0,0 +1,99 @@
package store
import (
"strings"
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/spf13/viper"
)
// PeerStore is a blob store that gets blobs from a peer.
// It satisfies the BlobStore interface but cannot put or delete blobs.
type PeerStore struct {
opts PeerParams
name string
}
// PeerParams allows to set options for a new PeerStore.
type PeerParams struct {
Name string `mapstructure:"name"`
Address string `mapstructure:"address"`
Timeout time.Duration `mapstructure:"timeout"`
}
// NewPeerStore makes a new peer store.
func NewPeerStore(params PeerParams) *PeerStore {
return &PeerStore{opts: params, name: params.Name}
}
const namePeer = "peer"
func PeerStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg PeerParams
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
return NewPeerStore(cfg), nil
}
func init() {
RegisterStore(namePeer, PeerStoreFactory)
}
func (p *PeerStore) Name() string { return namePeer + "-" + p.name }
func (p *PeerStore) getClient() (*PeerClient, error) {
c := &PeerClient{Timeout: p.opts.Timeout}
err := c.Connect(p.opts.Address)
return c, errors.Prefix("connection error", err)
}
// Has asks the peer if they have a hash
func (p *PeerStore) Has(hash string) (bool, error) {
c, err := p.getClient()
if err != nil {
return false, err
}
defer func() { _ = c.Close() }()
return c.HasBlob(hash)
}
// Get downloads the blob from the peer
func (p *PeerStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
c, err := p.getClient()
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), p.Name()), err
}
defer func() { _ = c.Close() }()
blob, trace, err := c.GetBlob(hash)
if err != nil && strings.Contains(err.Error(), "blob not found") {
return nil, trace, ErrBlobNotFound
}
return blob, trace, err
}
// Put is not supported
func (p *PeerStore) Put(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// PutSD is not supported
func (p *PeerStore) PutSD(hash string, blob stream.Blob) error {
return errors.Err(shared.ErrNotImplemented)
}
// Delete is not supported
func (p *PeerStore) Delete(hash string) error {
return errors.Err(shared.ErrNotImplemented)
}
// Shutdown is not supported
func (p *PeerStore) Shutdown() {
}

95
store/peer_client.go Normal file
View file

@ -0,0 +1,95 @@
package store
import (
"bufio"
"bytes"
"encoding/binary"
"io"
"net"
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/stream"
)
// PeerClient is a client for peer blob store
type PeerClient struct {
conn net.Conn
Timeout time.Duration
}
// Connect connects to a peer
func (c *PeerClient) Connect(address string) error {
var err error
c.conn, err = net.DialTimeout("tcp", address, c.Timeout)
return err
}
// Close closes the connection
func (c *PeerClient) Close() error {
if c.conn != nil {
return c.conn.Close()
}
return nil
}
// HasBlob checks if the peer has a blob
func (c *PeerClient) HasBlob(hash string) (bool, error) {
err := c.writeRequest("has", hash)
if err != nil {
return false, err
}
response, err := c.readResponse()
if err != nil {
return false, err
}
return response == "yes", nil
}
// GetBlob gets a blob from the peer
func (c *PeerClient) GetBlob(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
err := c.writeRequest("get", hash)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "peer"), err
}
response, err := c.readResponse()
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "peer"), err
}
if response == "no" {
return nil, shared.NewBlobTrace(time.Since(start), "peer"), ErrBlobNotFound
}
size, err := binary.ReadVarint(bufio.NewReader(bytes.NewReader([]byte(response))))
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "peer"), err
}
blob := make([]byte, size)
_, err = io.ReadFull(c.conn, blob)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), "peer"), err
}
return blob, shared.NewBlobTrace(time.Since(start), "peer"), nil
}
func (c *PeerClient) writeRequest(cmd, hash string) error {
_, err := c.conn.Write([]byte(cmd + " " + hash + "\n"))
return err
}
func (c *PeerClient) readResponse() (string, error) {
reader := bufio.NewReader(c.conn)
response, err := reader.ReadString('\n')
if err != nil {
return "", err
}
return response[:len(response)-1], nil
}

146
store/proxied_s3.go Normal file
View file

@ -0,0 +1,146 @@
package store
import (
"strings"
"time"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/spf13/viper"
)
// ProxiedS3Store writes to an S3 store and reads from any BlobStore (usually an ITTTStore of HttpStore endpoints).
type ProxiedS3Store struct {
readerStore BlobStore
writerStore BlobStore
name string
}
func (c *ProxiedS3Store) MissingBlobsForKnownStream(s string) ([]string, error) {
if bc, ok := c.writerStore.(NeededBlobChecker); ok {
return bc.MissingBlobsForKnownStream(s)
}
return nil, errors.Err("writer does not implement neededBlobChecker")
}
func (c *ProxiedS3Store) Block(hash string) error {
if bl, ok := c.writerStore.(Blocklister); ok {
return bl.Block(hash)
}
return errors.Err("writer does not implement Blocklister")
}
func (c *ProxiedS3Store) Wants(hash string) (bool, error) {
if bl, ok := c.writerStore.(Blocklister); ok {
return bl.Wants(hash)
}
return true, errors.Err("writer does not implement Blocklister")
}
type ProxiedS3Params struct {
Name string `mapstructure:"name"`
Reader BlobStore `mapstructure:"reader"`
Writer BlobStore `mapstructure:"writer"`
}
type ProxiedS3Config struct {
Name string `mapstructure:"name"`
Reader *viper.Viper
Writer *viper.Viper
}
// NewProxiedS3Store returns an initialized ProxiedS3Store store pointer.
// NOTE: It panics if either argument is nil.
func NewProxiedS3Store(params ProxiedS3Params) *ProxiedS3Store {
if params.Reader == nil || params.Writer == nil {
panic("both stores must be set")
}
return &ProxiedS3Store{
readerStore: params.Reader,
writerStore: params.Writer,
name: params.Name,
}
}
const nameProxiedS3 = "proxied-s3"
// Name is the cache type name
func (c *ProxiedS3Store) Name() string { return nameProxiedS3 + "-" + c.name }
// Has checks if the hash is in the store.
func (c *ProxiedS3Store) Has(hash string) (bool, error) {
return c.writerStore.Has(hash)
}
// Get gets the blob from Cloudfront.
func (c *ProxiedS3Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
blob, trace, err := c.readerStore.Get(hash)
return blob, trace.Stack(time.Since(start), c.Name()), err
}
// Put stores the blob on S3
func (c *ProxiedS3Store) Put(hash string, blob stream.Blob) error {
return c.writerStore.Put(hash, blob)
}
// PutSD stores the sd blob on S3
func (c *ProxiedS3Store) PutSD(hash string, blob stream.Blob) error {
return c.writerStore.PutSD(hash, blob)
}
// Delete deletes the blob from S3
func (c *ProxiedS3Store) Delete(hash string) error {
return c.writerStore.Delete(hash)
}
// Shutdown shuts down the store gracefully
func (c *ProxiedS3Store) Shutdown() {
c.writerStore.Shutdown()
c.readerStore.Shutdown()
}
func ProxiedS3StoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg ProxiedS3Config
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
cfg.Reader = config.Sub("reader")
cfg.Writer = config.Sub("writer")
readerStoreType := strings.Split(cfg.Reader.AllKeys()[0], ".")[0]
readerStoreConfig := cfg.Reader.Sub(readerStoreType)
factory, ok := Factories[readerStoreType]
if !ok {
return nil, errors.Err("unknown store type %s", readerStoreType)
}
readerStore, err := factory(readerStoreConfig)
if err != nil {
return nil, errors.Err(err)
}
writerStoreType := strings.Split(cfg.Writer.AllKeys()[0], ".")[0]
writerStoreConfig := cfg.Writer.Sub(writerStoreType)
factory, ok = Factories[writerStoreType]
if !ok {
return nil, errors.Err("unknown store type %s", writerStoreType)
}
writerStore, err := factory(writerStoreConfig)
if err != nil {
return nil, errors.Err(err)
}
return NewProxiedS3Store(ProxiedS3Params{
Name: cfg.Name,
Reader: readerStore,
Writer: writerStore,
}), nil
}
func init() {
RegisterStore(nameProxiedS3, ProxiedS3StoreFactory)
}

View file

@ -2,7 +2,7 @@ package store
import (
"bytes"
"net/http"
"path"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
@ -18,36 +18,63 @@ import (
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
// S3Store is an S3 store
type S3Store struct {
awsID string
awsSecret string
region string
bucket string
endpoint string
awsID string
awsSecret string
region string
bucket string
endpoint string
name string
prefixLength int
session *session.Session
}
session *session.Session
type S3Params struct {
Name string `mapstructure:"name"`
AwsID string `mapstructure:"aws_id"`
AwsSecret string `mapstructure:"aws_secret"`
Region string `mapstructure:"region"`
Bucket string `mapstructure:"bucket"`
Endpoint string `mapstructure:"endpoint"`
ShardingSize int `mapstructure:"sharding_size"`
}
// NewS3Store returns an initialized S3 store pointer.
func NewS3Store(awsID, awsSecret, region, bucket, endpoint string) *S3Store {
func NewS3Store(params S3Params) *S3Store {
return &S3Store{
awsID: awsID,
awsSecret: awsSecret,
region: region,
bucket: bucket,
endpoint: endpoint,
awsID: params.AwsID,
awsSecret: params.AwsSecret,
region: params.Region,
bucket: params.Bucket,
endpoint: params.Endpoint,
name: params.Name,
prefixLength: params.ShardingSize,
}
}
const nameS3 = "s3"
// Name is the cache type name
func (s *S3Store) Name() string { return nameS3 }
func S3StoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg S3Params
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
return NewS3Store(cfg), nil
}
// Has returns T/F or Error ( from S3 ) if the store contains the blob.
func init() {
RegisterStore(nameS3, S3StoreFactory)
}
// Name is the cache type name
func (s *S3Store) Name() string { return nameS3 + "-" + s.name }
// Has returns T/F or Error (from S3) if the store contains the blob.
func (s *S3Store) Has(hash string) (bool, error) {
err := s.initOnce()
if err != nil {
@ -56,13 +83,13 @@ func (s *S3Store) Has(hash string) (bool, error) {
_, err = s3.New(s.session).HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(hash),
Key: aws.String(s.shardedPath(hash)),
})
if err != nil {
if reqFail, ok := err.(s3.RequestFailure); ok && reqFail.StatusCode() == http.StatusNotFound {
if aerr, ok := err.(awserr.Error); ok && aerr.Code() == s3.ErrCodeNoSuchKey {
return false, nil
}
return false, err
return false, errors.Err(err)
}
return true, nil
@ -79,13 +106,13 @@ func (s *S3Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
log.Debugf("Getting %s from S3", hash[:8])
defer func(t time.Time) {
log.Debugf("Getting %s from S3 took %s", hash[:8], time.Since(t).String())
log.Debugf("Getting %s from %s took %s", hash[:8], s.Name(), time.Since(t).String())
}(start)
buf := &aws.WriteAtBuffer{}
_, err = s3manager.NewDownloader(s.session).Download(buf, &s3.GetObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(hash),
Key: aws.String(s.shardedPath(hash)),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
@ -96,7 +123,7 @@ func (s *S3Store) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
return nil, shared.NewBlobTrace(time.Since(start), s.Name()), errors.Err(ErrBlobNotFound)
}
}
return buf.Bytes(), shared.NewBlobTrace(time.Since(start), s.Name()), err
return nil, shared.NewBlobTrace(time.Since(start), s.Name()), errors.Err(err)
}
return buf.Bytes(), shared.NewBlobTrace(time.Since(start), s.Name()), nil
@ -116,14 +143,16 @@ func (s *S3Store) Put(hash string, blob stream.Blob) error {
_, err = s3manager.NewUploader(s.session).Upload(&s3manager.UploadInput{
Bucket: aws.String(s.bucket),
Key: aws.String(hash),
Key: aws.String(s.shardedPath(hash)),
Body: bytes.NewBuffer(blob),
ACL: aws.String("public-read"),
//StorageClass: aws.String(s3.StorageClassIntelligentTiering),
})
if err != nil {
return errors.Err(err)
}
metrics.MtrOutBytesReflector.Add(float64(blob.Size()))
return err
return nil
}
// PutSD stores the sd blob on S3 or errors if S3 connection errors.
@ -142,10 +171,10 @@ func (s *S3Store) Delete(hash string) error {
_, err = s3.New(s.session).DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(s.bucket),
Key: aws.String(hash),
Key: aws.String(s.shardedPath(hash)),
})
return err
return errors.Err(err)
}
func (s *S3Store) initOnce() error {
@ -154,19 +183,26 @@ func (s *S3Store) initOnce() error {
}
sess, err := session.NewSession(&aws.Config{
Credentials: credentials.NewStaticCredentials(s.awsID, s.awsSecret, ""),
Region: aws.String(s.region),
Endpoint: aws.String(s.endpoint),
Credentials: credentials.NewStaticCredentials(s.awsID, s.awsSecret, ""),
Region: aws.String(s.region),
Endpoint: aws.String(s.endpoint),
S3ForcePathStyle: aws.Bool(true),
})
if err != nil {
return err
return errors.Err(err)
}
s.session = sess
return nil
}
func (s *S3Store) shardedPath(hash string) string {
if s.prefixLength <= 0 || len(hash) < s.prefixLength {
return hash
}
return path.Join(hash[:s.prefixLength], hash)
}
// Shutdown shuts down the store gracefully
func (s *S3Store) Shutdown() {
return
}

View file

@ -1,6 +1,7 @@
package store
import (
"strings"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
@ -9,6 +10,7 @@ import (
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
"github.com/spf13/viper"
"golang.org/x/sync/singleflight"
)
@ -27,6 +29,38 @@ type singleflightStore struct {
sf *singleflight.Group
}
type SingleFlightConfig struct {
Component string `mapstructure:"component"`
Store *viper.Viper
}
func SingleFlightStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg SingleFlightConfig
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
cfg.Store = config.Sub("store")
storeType := strings.Split(cfg.Store.AllKeys()[0], ".")[0]
storeConfig := cfg.Store.Sub(storeType)
factory, ok := Factories[storeType]
if !ok {
return nil, errors.Err("unknown store type %s", storeType)
}
underlyingStore, err := factory(storeConfig)
if err != nil {
return nil, errors.Err(err)
}
return WithSingleFlight(cfg.Component, underlyingStore), nil
}
func init() {
RegisterStore("singleflight", SingleFlightStoreFactory)
}
func (s *singleflightStore) Name() string {
return "sf_" + s.BlobStore.Name()
}
@ -124,5 +158,4 @@ func (s *singleflightStore) putter(hash string, blob stream.Blob) func() (interf
// Shutdown shuts down the store gracefully
func (s *singleflightStore) Shutdown() {
s.BlobStore.Shutdown()
return
}

View file

@ -2,6 +2,7 @@ package store
import (
"github.com/lbryio/reflector.go/shared"
"github.com/spf13/viper"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
@ -33,11 +34,23 @@ type Blocklister interface {
Wants(hash string) (bool, error)
}
// NeededBlobChecker can check which blobs from a known stream are not uploaded yet
type NeededBlobChecker interface {
MissingBlobsForKnownStream(string) ([]string, error)
}
// lister is a store that can list cached blobs. This is helpful when an overlay
// cache needs to track blob existence.
type lister interface {
list() ([]string, error)
}
//ErrBlobNotFound is a standard error when a blob is not found in the store.
// ErrBlobNotFound is a standard error when a blob is not found in the store.
var ErrBlobNotFound = errors.Base("blob not found")
var Factories = make(map[string]Factory)
func RegisterStore(name string, factory Factory) {
Factories[name] = factory
}
type Factory func(config *viper.Viper) (BlobStore, error)

203
store/upstream.go Normal file
View file

@ -0,0 +1,203 @@
package store
import (
"bytes"
"context"
"io"
"net"
"net/http"
"sync"
"time"
"github.com/lbryio/reflector.go/internal/metrics"
"github.com/lbryio/reflector.go/shared"
"github.com/lbryio/lbry.go/v2/extras/errors"
"github.com/lbryio/lbry.go/v2/stream"
log "github.com/sirupsen/logrus"
"github.com/spf13/viper"
)
// UpstreamStore is a store that works on top of the HTTP protocol
type UpstreamStore struct {
upstream string
httpClient *http.Client
edgeToken string
name string
}
type UpstreamParams struct {
Name string `mapstructure:"name"`
Upstream string `mapstructure:"upstream"`
EdgeToken string `mapstructure:"edge_token"`
}
func NewUpstreamStore(params UpstreamParams) *UpstreamStore {
return &UpstreamStore{
upstream: params.Upstream,
httpClient: getClient(),
edgeToken: params.EdgeToken,
name: params.Name,
}
}
const nameUpstream = "upstream"
func UpstreamStoreFactory(config *viper.Viper) (BlobStore, error) {
var cfg UpstreamParams
err := config.Unmarshal(&cfg)
if err != nil {
return nil, errors.Err(err)
}
return NewUpstreamStore(cfg), nil
}
func init() {
RegisterStore(nameUpstream, UpstreamStoreFactory)
}
func (n *UpstreamStore) Name() string { return nameUpstream + "-" + n.name }
func (n *UpstreamStore) Has(hash string) (bool, error) {
url := n.upstream + "/blob?hash=" + hash
req, err := http.NewRequest("HEAD", url, nil)
if err != nil {
return false, errors.Err(err)
}
res, err := n.httpClient.Do(req)
if err != nil {
return false, errors.Err(err)
}
defer func() { _ = res.Body.Close() }()
if res.StatusCode == http.StatusNotFound {
return false, nil
}
if res.StatusCode == http.StatusNoContent {
return true, nil
}
var body []byte
if res.Body != nil {
body, _ = io.ReadAll(res.Body)
}
return false, errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
}
func (n *UpstreamStore) Get(hash string) (stream.Blob, shared.BlobTrace, error) {
start := time.Now()
url := n.upstream + "/blob?hash=" + hash
if n.edgeToken != "" {
url += "&edge_token=" + n.edgeToken
}
req, err := http.NewRequest(http.MethodGet, url, nil)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), n.Name()), errors.Err(err)
}
res, err := n.httpClient.Do(req)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), n.Name()), errors.Err(err)
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
log.Errorf("Error closing response body in HTTP-GET: %s", err.Error())
}
}(res.Body)
viaHeader := res.Header.Get("Via")
var trace shared.BlobTrace
if viaHeader != "" {
parsedTrace, err := shared.Deserialize(viaHeader)
if err != nil {
return nil, shared.NewBlobTrace(time.Since(start), n.Name()), err
}
trace = *parsedTrace
} else {
trace = shared.NewBlobTrace(0, n.Name())
}
switch res.StatusCode {
case http.StatusNotFound:
return nil, trace.Stack(time.Since(start), n.Name()), ErrBlobNotFound
case http.StatusOK:
buffer := getBuffer()
defer putBuffer(buffer)
if _, err := io.Copy(buffer, res.Body); err != nil {
return nil, trace.Stack(time.Since(start), n.Name()), errors.Err(err)
}
blob := make([]byte, buffer.Len())
copy(blob, buffer.Bytes())
metrics.MtrInBytesUpstream.Add(float64(len(blob)))
return blob, trace.Stack(time.Since(start), n.Name()), nil
default:
body, _ := io.ReadAll(res.Body)
log.Warnf("Got status code %d (%s)", res.StatusCode, string(body))
return nil, trace.Stack(time.Since(start), n.Name()),
errors.Err("upstream error. Status code: %d (%s)", res.StatusCode, string(body))
}
}
func (n *UpstreamStore) Put(string, stream.Blob) error {
return shared.ErrNotImplemented
}
func (n *UpstreamStore) PutSD(string, stream.Blob) error {
return shared.ErrNotImplemented
}
func (n *UpstreamStore) Delete(string) error {
return shared.ErrNotImplemented
}
func (n *UpstreamStore) Shutdown() {}
// buffer pool to reduce GC
// https://www.captaincodeman.com/2017/06/02/golang-buffer-pool-gotcha
var buffers = sync.Pool{
// New is called when a new instance is needed
New: func() interface{} {
buf := make([]byte, 0, stream.MaxBlobSize)
return bytes.NewBuffer(buf)
},
}
// getBuffer fetches a buffer from the pool
func getBuffer() *bytes.Buffer {
return buffers.Get().(*bytes.Buffer)
}
// putBuffer returns a buffer to the pool
func putBuffer(buf *bytes.Buffer) {
buf.Reset()
buffers.Put(buf)
}
func dialContext(ctx context.Context, network, address string) (net.Conn, error) {
dialer := &net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}
return dialer.DialContext(ctx, network, address)
}
// getClient gets an http client that's customized to be more performant when dealing with blobs of 2MB in size (most of our blobs)
func getClient() *http.Client {
// Customize the Transport to have larger connection pool
defaultTransport := &http.Transport{
DialContext: dialContext,
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
DisableCompression: true,
MaxIdleConnsPerHost: 100,
ReadBufferSize: stream.MaxBlobSize + 1024*10, //add an extra few KBs to make sure it fits the extra information
}
return &http.Client{Transport: defaultTransport}
}