mirror of
				https://github.com/go-gitea/gitea.git
				synced 2025-10-31 02:46:04 +01:00 
			
		
		
		
	[Vendor] Update go-redis to v8.5.0 (#13749)
* Update go-redis to v8.4.0 * github.com/go-redis/redis/v8 v8.4.0 -> v8.5.0 * Apply suggestions from code review Co-authored-by: zeripath <art27@cantab.net> * TODO * Use the Queue termination channel as the default context for pushes Signed-off-by: Andrew Thornton <art27@cantab.net> * missed one Signed-off-by: Andrew Thornton <art27@cantab.net> Co-authored-by: zeripath <art27@cantab.net>
This commit is contained in:
		
							
								
								
									
										3
									
								
								vendor/github.com/go-redis/redis/v8/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										3
									
								
								vendor/github.com/go-redis/redis/v8/.gitignore
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,3 @@ | ||||
| *.rdb | ||||
| testdata/*/ | ||||
| .idea/ | ||||
							
								
								
									
										24
									
								
								vendor/github.com/go-redis/redis/v8/.golangci.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								vendor/github.com/go-redis/redis/v8/.golangci.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| run: | ||||
|   concurrency: 8 | ||||
|   deadline: 5m | ||||
|   tests: false | ||||
| linters: | ||||
|   enable-all: true | ||||
|   disable: | ||||
|     - funlen | ||||
|     - gochecknoglobals | ||||
|     - gochecknoinits | ||||
|     - gocognit | ||||
|     - goconst | ||||
|     - godox | ||||
|     - gosec | ||||
|     - maligned | ||||
|     - wsl | ||||
|     - gomnd | ||||
|     - goerr113 | ||||
|     - exhaustive | ||||
|     - nestif | ||||
|     - nlreturn | ||||
|     - exhaustivestruct | ||||
|     - wrapcheck | ||||
|     - errorlint | ||||
							
								
								
									
										4
									
								
								vendor/github.com/go-redis/redis/v8/.prettierrc
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/go-redis/redis/v8/.prettierrc
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,4 @@ | ||||
| semi: false | ||||
| singleQuote: true | ||||
| proseWrap: always | ||||
| printWidth: 100 | ||||
							
								
								
									
										20
									
								
								vendor/github.com/go-redis/redis/v8/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								vendor/github.com/go-redis/redis/v8/.travis.yml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| dist: xenial | ||||
| language: go | ||||
|  | ||||
| services: | ||||
|   - redis-server | ||||
|  | ||||
| go: | ||||
|   - 1.14.x | ||||
|   - 1.15.x | ||||
|   - tip | ||||
|  | ||||
| matrix: | ||||
|   allow_failures: | ||||
|     - go: tip | ||||
|  | ||||
| go_import_path: github.com/go-redis/redis | ||||
|  | ||||
| before_install: | ||||
|   - curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- | ||||
|     -b $(go env GOPATH)/bin v1.32.2 | ||||
							
								
								
									
										5
									
								
								vendor/github.com/go-redis/redis/v8/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										5
									
								
								vendor/github.com/go-redis/redis/v8/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,5 @@ | ||||
| # Changelog | ||||
|  | ||||
| > :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev) | ||||
|  | ||||
| See https://redis.uptrace.dev/changelog/ | ||||
							
								
								
									
										25
									
								
								vendor/github.com/go-redis/redis/v8/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								vendor/github.com/go-redis/redis/v8/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| Copyright (c) 2013 The github.com/go-redis/redis Authors. | ||||
| All rights reserved. | ||||
|  | ||||
| Redistribution and use in source and binary forms, with or without | ||||
| modification, are permitted provided that the following conditions are | ||||
| met: | ||||
|  | ||||
|    * Redistributions of source code must retain the above copyright | ||||
| notice, this list of conditions and the following disclaimer. | ||||
|    * Redistributions in binary form must reproduce the above | ||||
| copyright notice, this list of conditions and the following disclaimer | ||||
| in the documentation and/or other materials provided with the | ||||
| distribution. | ||||
|  | ||||
| THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | ||||
| "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | ||||
| LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | ||||
| A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | ||||
| OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | ||||
| SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | ||||
| LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | ||||
| DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | ||||
| THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | ||||
| (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | ||||
| OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | ||||
							
								
								
									
										27
									
								
								vendor/github.com/go-redis/redis/v8/Makefile
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/github.com/go-redis/redis/v8/Makefile
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| all: testdeps | ||||
| 	go test ./... | ||||
| 	go test ./... -short -race | ||||
| 	go test ./... -run=NONE -bench=. -benchmem | ||||
| 	env GOOS=linux GOARCH=386 go test ./... | ||||
| 	go vet | ||||
| 	golangci-lint run | ||||
|  | ||||
| testdeps: testdata/redis/src/redis-server | ||||
|  | ||||
| bench: testdeps | ||||
| 	go test ./... -test.run=NONE -test.bench=. -test.benchmem | ||||
|  | ||||
| .PHONY: all test testdeps bench | ||||
|  | ||||
| testdata/redis: | ||||
| 	mkdir -p $@ | ||||
| 	wget -qO- http://download.redis.io/redis-stable.tar.gz | tar xvz --strip-components=1 -C $@ | ||||
|  | ||||
| testdata/redis/src/redis-server: testdata/redis | ||||
| 	cd $< && make all | ||||
|  | ||||
| tag: | ||||
| 	git tag $(VERSION) | ||||
| 	git tag extra/rediscmd/$(VERSION) | ||||
| 	git tag extra/redisotel/$(VERSION) | ||||
| 	git tag extra/rediscensus/$(VERSION) | ||||
							
								
								
									
										159
									
								
								vendor/github.com/go-redis/redis/v8/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										159
									
								
								vendor/github.com/go-redis/redis/v8/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,159 @@ | ||||
| # Redis client for Golang | ||||
|  | ||||
| [](https://travis-ci.org/go-redis/redis) | ||||
| [](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) | ||||
| [](https://redis.uptrace.dev/) | ||||
| [](https://discord.gg/rWtp5Aj) | ||||
|  | ||||
| > :heart: [**Uptrace.dev** - distributed traces, logs, and errors in one place](https://uptrace.dev) | ||||
|  | ||||
| - Join [Discord](https://discord.gg/rWtp5Aj) to ask questions. | ||||
| - [Documentation](https://redis.uptrace.dev) | ||||
| - [Reference](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc) | ||||
| - [Examples](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#pkg-examples) | ||||
| - [RealWorld example app](https://github.com/uptrace/go-treemux-realworld-example-app) | ||||
|  | ||||
| ## Ecosystem | ||||
|  | ||||
| - [Redis Mock](https://github.com/go-redis/redismock). | ||||
| - [Distributed Locks](https://github.com/bsm/redislock). | ||||
| - [Redis Cache](https://github.com/go-redis/cache). | ||||
| - [Rate limiting](https://github.com/go-redis/redis_rate). | ||||
|  | ||||
| ## Features | ||||
|  | ||||
| - Redis 3 commands except QUIT, MONITOR, and SYNC. | ||||
| - Automatic connection pooling with | ||||
|   [circuit breaker](https://en.wikipedia.org/wiki/Circuit_breaker_design_pattern) support. | ||||
| - [Pub/Sub](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#PubSub). | ||||
| - [Transactions](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). | ||||
| - [Pipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-Pipeline) and | ||||
|   [TxPipeline](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-Client-TxPipeline). | ||||
| - [Scripting](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Script). | ||||
| - [Timeouts](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#Options). | ||||
| - [Redis Sentinel](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewFailoverClient). | ||||
| - [Redis Cluster](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewClusterClient). | ||||
| - [Cluster of Redis Servers](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#example-NewClusterClient--ManualSetup) | ||||
|   without using cluster mode and Redis Sentinel. | ||||
| - [Ring](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#NewRing). | ||||
| - [Instrumentation](https://pkg.go.dev/github.com/go-redis/redis/v8?tab=doc#ex-package--Instrumentation). | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| go-redis supports 2 last Go versions and requires a Go version with | ||||
| [modules](https://github.com/golang/go/wiki/Modules) support. So make sure to initialize a Go | ||||
| module: | ||||
|  | ||||
| ```shell | ||||
| go mod init github.com/my/repo | ||||
| ``` | ||||
|  | ||||
| And then install go-redis/v8 (note _v8_ in the import; omitting it is a popular mistake): | ||||
|  | ||||
| ```shell | ||||
| go get github.com/go-redis/redis/v8 | ||||
| ``` | ||||
|  | ||||
| ## Quickstart | ||||
|  | ||||
| ```go | ||||
| import ( | ||||
|     "context" | ||||
|     "github.com/go-redis/redis/v8" | ||||
| ) | ||||
|  | ||||
| var ctx = context.Background() | ||||
|  | ||||
| func ExampleClient() { | ||||
|     rdb := redis.NewClient(&redis.Options{ | ||||
|         Addr:     "localhost:6379", | ||||
|         Password: "", // no password set | ||||
|         DB:       0,  // use default DB | ||||
|     }) | ||||
|  | ||||
|     err := rdb.Set(ctx, "key", "value", 0).Err() | ||||
|     if err != nil { | ||||
|         panic(err) | ||||
|     } | ||||
|  | ||||
|     val, err := rdb.Get(ctx, "key").Result() | ||||
|     if err != nil { | ||||
|         panic(err) | ||||
|     } | ||||
|     fmt.Println("key", val) | ||||
|  | ||||
|     val2, err := rdb.Get(ctx, "key2").Result() | ||||
|     if err == redis.Nil { | ||||
|         fmt.Println("key2 does not exist") | ||||
|     } else if err != nil { | ||||
|         panic(err) | ||||
|     } else { | ||||
|         fmt.Println("key2", val2) | ||||
|     } | ||||
|     // Output: key value | ||||
|     // key2 does not exist | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ## Look and feel | ||||
|  | ||||
| Some corner cases: | ||||
|  | ||||
| ```go | ||||
| // SET key value EX 10 NX | ||||
| set, err := rdb.SetNX(ctx, "key", "value", 10*time.Second).Result() | ||||
|  | ||||
| // SET key value keepttl NX | ||||
| set, err := rdb.SetNX(ctx, "key", "value", redis.KeepTTL).Result() | ||||
|  | ||||
| // SORT list LIMIT 0 2 ASC | ||||
| vals, err := rdb.Sort(ctx, "list", &redis.Sort{Offset: 0, Count: 2, Order: "ASC"}).Result() | ||||
|  | ||||
| // ZRANGEBYSCORE zset -inf +inf WITHSCORES LIMIT 0 2 | ||||
| vals, err := rdb.ZRangeByScoreWithScores(ctx, "zset", &redis.ZRangeBy{ | ||||
|     Min: "-inf", | ||||
|     Max: "+inf", | ||||
|     Offset: 0, | ||||
|     Count: 2, | ||||
| }).Result() | ||||
|  | ||||
| // ZINTERSTORE out 2 zset1 zset2 WEIGHTS 2 3 AGGREGATE SUM | ||||
| vals, err := rdb.ZInterStore(ctx, "out", &redis.ZStore{ | ||||
|     Keys: []string{"zset1", "zset2"}, | ||||
|     Weights: []int64{2, 3} | ||||
| }).Result() | ||||
|  | ||||
| // EVAL "return {KEYS[1],ARGV[1]}" 1 "key" "hello" | ||||
| vals, err := rdb.Eval(ctx, "return {KEYS[1],ARGV[1]}", []string{"key"}, "hello").Result() | ||||
|  | ||||
| // custom command | ||||
| res, err := rdb.Do(ctx, "set", "key", "value").Result() | ||||
| ``` | ||||
| ## Run the test | ||||
| go-redis will start a redis-server and run the test cases.  | ||||
|  | ||||
| The paths of redis-server bin file and redis config file are definded in `main_test.go`: | ||||
| ``` | ||||
| var ( | ||||
| 	redisServerBin, _  = filepath.Abs(filepath.Join("testdata", "redis", "src", "redis-server")) | ||||
| 	redisServerConf, _ = filepath.Abs(filepath.Join("testdata", "redis", "redis.conf")) | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| For local testing, you can change the variables to refer to your local files, or create a soft link to the corresponding folder for redis-server and copy the config file to `testdata/redis/`: | ||||
| ``` | ||||
| ln -s /usr/bin/redis-server ./go-redis/testdata/redis/src | ||||
| cp ./go-redis/testdata/redis.conf ./go-redis/testdata/redis/ | ||||
| ``` | ||||
|  | ||||
| Lastly, run: | ||||
| ``` | ||||
| go test | ||||
| ``` | ||||
|  | ||||
| ## See also | ||||
|  | ||||
| - [Fast and flexible HTTP router](https://github.com/vmihailenco/treemux) | ||||
| - [Golang PostgreSQL ORM](https://github.com/go-pg/pg) | ||||
| - [Golang msgpack](https://github.com/vmihailenco/msgpack) | ||||
| - [Golang message task queue](https://github.com/vmihailenco/taskq) | ||||
							
								
								
									
										1734
									
								
								vendor/github.com/go-redis/redis/v8/cluster.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										1734
									
								
								vendor/github.com/go-redis/redis/v8/cluster.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										25
									
								
								vendor/github.com/go-redis/redis/v8/cluster_commands.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										25
									
								
								vendor/github.com/go-redis/redis/v8/cluster_commands.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,25 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"sync/atomic" | ||||
| ) | ||||
|  | ||||
| func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd { | ||||
| 	cmd := NewIntCmd(ctx, "dbsize") | ||||
| 	var size int64 | ||||
| 	err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error { | ||||
| 		n, err := master.DBSize(ctx).Result() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		atomic.AddInt64(&size, n) | ||||
| 		return nil | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		cmd.SetErr(err) | ||||
| 		return cmd | ||||
| 	} | ||||
| 	cmd.val = size | ||||
| 	return cmd | ||||
| } | ||||
							
								
								
									
										2438
									
								
								vendor/github.com/go-redis/redis/v8/command.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										2438
									
								
								vendor/github.com/go-redis/redis/v8/command.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										2790
									
								
								vendor/github.com/go-redis/redis/v8/commands.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										2790
									
								
								vendor/github.com/go-redis/redis/v8/commands.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
										
											
												File diff suppressed because it is too large
												Load Diff
											
										
									
								
							
							
								
								
									
										4
									
								
								vendor/github.com/go-redis/redis/v8/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										4
									
								
								vendor/github.com/go-redis/redis/v8/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,4 @@ | ||||
| /* | ||||
| Package redis implements a Redis client. | ||||
| */ | ||||
| package redis | ||||
							
								
								
									
										125
									
								
								vendor/github.com/go-redis/redis/v8/error.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								vendor/github.com/go-redis/redis/v8/error.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,125 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"io" | ||||
| 	"net" | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal/pool" | ||||
| 	"github.com/go-redis/redis/v8/internal/proto" | ||||
| ) | ||||
|  | ||||
| var ErrClosed = pool.ErrClosed | ||||
|  | ||||
| type Error interface { | ||||
| 	error | ||||
|  | ||||
| 	// RedisError is a no-op function but | ||||
| 	// serves to distinguish types that are Redis | ||||
| 	// errors from ordinary errors: a type is a | ||||
| 	// Redis error if it has a RedisError method. | ||||
| 	RedisError() | ||||
| } | ||||
|  | ||||
| var _ Error = proto.RedisError("") | ||||
|  | ||||
| func shouldRetry(err error, retryTimeout bool) bool { | ||||
| 	switch err { | ||||
| 	case io.EOF, io.ErrUnexpectedEOF: | ||||
| 		return true | ||||
| 	case nil, context.Canceled, context.DeadlineExceeded: | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	if v, ok := err.(timeoutError); ok { | ||||
| 		if v.Timeout() { | ||||
| 			return retryTimeout | ||||
| 		} | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	s := err.Error() | ||||
| 	if s == "ERR max number of clients reached" { | ||||
| 		return true | ||||
| 	} | ||||
| 	if strings.HasPrefix(s, "LOADING ") { | ||||
| 		return true | ||||
| 	} | ||||
| 	if strings.HasPrefix(s, "READONLY ") { | ||||
| 		return true | ||||
| 	} | ||||
| 	if strings.HasPrefix(s, "CLUSTERDOWN ") { | ||||
| 		return true | ||||
| 	} | ||||
| 	if strings.HasPrefix(s, "TRYAGAIN ") { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| func isRedisError(err error) bool { | ||||
| 	_, ok := err.(proto.RedisError) | ||||
| 	return ok | ||||
| } | ||||
|  | ||||
| func isBadConn(err error, allowTimeout bool) bool { | ||||
| 	switch err { | ||||
| 	case nil: | ||||
| 		return false | ||||
| 	case context.Canceled, context.DeadlineExceeded: | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	if isRedisError(err) { | ||||
| 		// Close connections in read only state in case domain addr is used | ||||
| 		// and domain resolves to a different Redis Server. See #790. | ||||
| 		return isReadOnlyError(err) | ||||
| 	} | ||||
|  | ||||
| 	if allowTimeout { | ||||
| 		if netErr, ok := err.(net.Error); ok && netErr.Timeout() { | ||||
| 			return !netErr.Temporary() | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func isMovedError(err error) (moved bool, ask bool, addr string) { | ||||
| 	if !isRedisError(err) { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	s := err.Error() | ||||
| 	switch { | ||||
| 	case strings.HasPrefix(s, "MOVED "): | ||||
| 		moved = true | ||||
| 	case strings.HasPrefix(s, "ASK "): | ||||
| 		ask = true | ||||
| 	default: | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	ind := strings.LastIndex(s, " ") | ||||
| 	if ind == -1 { | ||||
| 		return false, false, "" | ||||
| 	} | ||||
| 	addr = s[ind+1:] | ||||
| 	return | ||||
| } | ||||
|  | ||||
| func isLoadingError(err error) bool { | ||||
| 	return strings.HasPrefix(err.Error(), "LOADING ") | ||||
| } | ||||
|  | ||||
| func isReadOnlyError(err error) bool { | ||||
| 	return strings.HasPrefix(err.Error(), "READONLY ") | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type timeoutError interface { | ||||
| 	Timeout() bool | ||||
| } | ||||
							
								
								
									
										11
									
								
								vendor/github.com/go-redis/redis/v8/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/go-redis/redis/v8/go.mod
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| module github.com/go-redis/redis/v8 | ||||
|  | ||||
| go 1.13 | ||||
|  | ||||
| require ( | ||||
| 	github.com/cespare/xxhash/v2 v2.1.1 | ||||
| 	github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f | ||||
| 	github.com/onsi/ginkgo v1.15.0 | ||||
| 	github.com/onsi/gomega v1.10.5 | ||||
| 	go.opentelemetry.io/otel v0.16.0 | ||||
| ) | ||||
							
								
								
									
										97
									
								
								vendor/github.com/go-redis/redis/v8/go.sum
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										97
									
								
								vendor/github.com/go-redis/redis/v8/go.sum
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,97 @@ | ||||
| github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= | ||||
| github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= | ||||
| github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= | ||||
| github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= | ||||
| github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= | ||||
| github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= | ||||
| github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= | ||||
| github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= | ||||
| github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= | ||||
| github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= | ||||
| github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= | ||||
| github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= | ||||
| github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= | ||||
| github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= | ||||
| github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= | ||||
| github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= | ||||
| github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= | ||||
| github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= | ||||
| github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= | ||||
| github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | ||||
| github.com/google/go-cmp v0.5.4 h1:L8R9j+yAqZuZjsqh/z+F1NCffTKKLShY6zXTItVIZ8M= | ||||
| github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= | ||||
| github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= | ||||
| github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= | ||||
| github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= | ||||
| github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= | ||||
| github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= | ||||
| github.com/onsi/ginkgo v1.14.2 h1:8mVmC9kjFFmA8H4pKMUhcblgifdkOIXPvbhN1T36q1M= | ||||
| github.com/onsi/ginkgo v1.14.2/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= | ||||
| github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= | ||||
| github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= | ||||
| github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= | ||||
| github.com/onsi/gomega v1.10.4 h1:NiTx7EEvBzu9sFOD1zORteLSt3o8gnlvZZwSE9TnY9U= | ||||
| github.com/onsi/gomega v1.10.4/go.mod h1:g/HbgYopi++010VEqkFgJHKC09uJiW9UkXvMUuKHUCQ= | ||||
| github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= | ||||
| github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= | ||||
| github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= | ||||
| github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= | ||||
| github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= | ||||
| github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= | ||||
| github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= | ||||
| go.opentelemetry.io/otel v0.16.0 h1:uIWEbdeb4vpKPGITLsRVUS44L5oDbDUCZxn8lkxhmgw= | ||||
| go.opentelemetry.io/otel v0.16.0/go.mod h1:e4GKElweB8W2gWUqbghw0B8t5MCTccc9212eNHnOHwA= | ||||
| golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= | ||||
| golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= | ||||
| golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= | ||||
| golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= | ||||
| golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= | ||||
| golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= | ||||
| golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= | ||||
| golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= | ||||
| golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | ||||
| golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb h1:eBmm0M9fYhWpKZLjQUUKka/LtIxf46G4fxeEz5KJr9U= | ||||
| golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= | ||||
| golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= | ||||
| golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= | ||||
| golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= | ||||
| golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= | ||||
| golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= | ||||
| golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= | ||||
| golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= | ||||
| golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= | ||||
| golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= | ||||
| golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= | ||||
| golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= | ||||
| golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= | ||||
| golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= | ||||
| google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= | ||||
| google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= | ||||
| google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= | ||||
| google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= | ||||
| google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= | ||||
| google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= | ||||
| google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= | ||||
| gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= | ||||
| gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= | ||||
| gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= | ||||
| gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= | ||||
| gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= | ||||
| gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | ||||
| gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= | ||||
| gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= | ||||
| gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= | ||||
| gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= | ||||
							
								
								
									
										56
									
								
								vendor/github.com/go-redis/redis/v8/internal/arg.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								vendor/github.com/go-redis/redis/v8/internal/arg.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,56 @@ | ||||
| package internal | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| func AppendArg(b []byte, v interface{}) []byte { | ||||
| 	switch v := v.(type) { | ||||
| 	case nil: | ||||
| 		return append(b, "<nil>"...) | ||||
| 	case string: | ||||
| 		return appendUTF8String(b, Bytes(v)) | ||||
| 	case []byte: | ||||
| 		return appendUTF8String(b, v) | ||||
| 	case int: | ||||
| 		return strconv.AppendInt(b, int64(v), 10) | ||||
| 	case int8: | ||||
| 		return strconv.AppendInt(b, int64(v), 10) | ||||
| 	case int16: | ||||
| 		return strconv.AppendInt(b, int64(v), 10) | ||||
| 	case int32: | ||||
| 		return strconv.AppendInt(b, int64(v), 10) | ||||
| 	case int64: | ||||
| 		return strconv.AppendInt(b, v, 10) | ||||
| 	case uint: | ||||
| 		return strconv.AppendUint(b, uint64(v), 10) | ||||
| 	case uint8: | ||||
| 		return strconv.AppendUint(b, uint64(v), 10) | ||||
| 	case uint16: | ||||
| 		return strconv.AppendUint(b, uint64(v), 10) | ||||
| 	case uint32: | ||||
| 		return strconv.AppendUint(b, uint64(v), 10) | ||||
| 	case uint64: | ||||
| 		return strconv.AppendUint(b, v, 10) | ||||
| 	case float32: | ||||
| 		return strconv.AppendFloat(b, float64(v), 'f', -1, 64) | ||||
| 	case float64: | ||||
| 		return strconv.AppendFloat(b, v, 'f', -1, 64) | ||||
| 	case bool: | ||||
| 		if v { | ||||
| 			return append(b, "true"...) | ||||
| 		} | ||||
| 		return append(b, "false"...) | ||||
| 	case time.Time: | ||||
| 		return v.AppendFormat(b, time.RFC3339Nano) | ||||
| 	default: | ||||
| 		return append(b, fmt.Sprint(v)...) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func appendUTF8String(dst []byte, src []byte) []byte { | ||||
| 	dst = append(dst, src...) | ||||
| 	return dst | ||||
| } | ||||
							
								
								
									
										78
									
								
								vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										78
									
								
								vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,78 @@ | ||||
| package hashtag | ||||
|  | ||||
| import ( | ||||
| 	"strings" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal/rand" | ||||
| ) | ||||
|  | ||||
| const slotNumber = 16384 | ||||
|  | ||||
| // CRC16 implementation according to CCITT standards. | ||||
| // Copyright 2001-2010 Georges Menie (www.menie.org) | ||||
| // Copyright 2013 The Go Authors. All rights reserved. | ||||
| // http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c | ||||
| var crc16tab = [256]uint16{ | ||||
| 	0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7, | ||||
| 	0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef, | ||||
| 	0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6, | ||||
| 	0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de, | ||||
| 	0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485, | ||||
| 	0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d, | ||||
| 	0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4, | ||||
| 	0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc, | ||||
| 	0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823, | ||||
| 	0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b, | ||||
| 	0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12, | ||||
| 	0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a, | ||||
| 	0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41, | ||||
| 	0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49, | ||||
| 	0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70, | ||||
| 	0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78, | ||||
| 	0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f, | ||||
| 	0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067, | ||||
| 	0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e, | ||||
| 	0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256, | ||||
| 	0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d, | ||||
| 	0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405, | ||||
| 	0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c, | ||||
| 	0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634, | ||||
| 	0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab, | ||||
| 	0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3, | ||||
| 	0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a, | ||||
| 	0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92, | ||||
| 	0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9, | ||||
| 	0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1, | ||||
| 	0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8, | ||||
| 	0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0, | ||||
| } | ||||
|  | ||||
| func Key(key string) string { | ||||
| 	if s := strings.IndexByte(key, '{'); s > -1 { | ||||
| 		if e := strings.IndexByte(key[s+1:], '}'); e > 0 { | ||||
| 			return key[s+1 : s+e+1] | ||||
| 		} | ||||
| 	} | ||||
| 	return key | ||||
| } | ||||
|  | ||||
| func RandomSlot() int { | ||||
| 	return rand.Intn(slotNumber) | ||||
| } | ||||
|  | ||||
| // hashSlot returns a consistent slot number between 0 and 16383 | ||||
| // for any given string key. | ||||
| func Slot(key string) int { | ||||
| 	if key == "" { | ||||
| 		return RandomSlot() | ||||
| 	} | ||||
| 	key = Key(key) | ||||
| 	return int(crc16sum(key)) % slotNumber | ||||
| } | ||||
|  | ||||
| func crc16sum(key string) (crc uint16) { | ||||
| 	for i := 0; i < len(key); i++ { | ||||
| 		crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff] | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
							
								
								
									
										151
									
								
								vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										151
									
								
								vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,151 @@ | ||||
| package hscan | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"strconv" | ||||
| ) | ||||
|  | ||||
| // decoderFunc represents decoding functions for default built-in types. | ||||
| type decoderFunc func(reflect.Value, string) error | ||||
|  | ||||
| var ( | ||||
| 	// List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1). | ||||
| 	decoders = []decoderFunc{ | ||||
| 		reflect.Bool:          decodeBool, | ||||
| 		reflect.Int:           decodeInt, | ||||
| 		reflect.Int8:          decodeInt, | ||||
| 		reflect.Int16:         decodeInt, | ||||
| 		reflect.Int32:         decodeInt, | ||||
| 		reflect.Int64:         decodeInt, | ||||
| 		reflect.Uint:          decodeUint, | ||||
| 		reflect.Uint8:         decodeUint, | ||||
| 		reflect.Uint16:        decodeUint, | ||||
| 		reflect.Uint32:        decodeUint, | ||||
| 		reflect.Uint64:        decodeUint, | ||||
| 		reflect.Float32:       decodeFloat, | ||||
| 		reflect.Float64:       decodeFloat, | ||||
| 		reflect.Complex64:     decodeUnsupported, | ||||
| 		reflect.Complex128:    decodeUnsupported, | ||||
| 		reflect.Array:         decodeUnsupported, | ||||
| 		reflect.Chan:          decodeUnsupported, | ||||
| 		reflect.Func:          decodeUnsupported, | ||||
| 		reflect.Interface:     decodeUnsupported, | ||||
| 		reflect.Map:           decodeUnsupported, | ||||
| 		reflect.Ptr:           decodeUnsupported, | ||||
| 		reflect.Slice:         decodeSlice, | ||||
| 		reflect.String:        decodeString, | ||||
| 		reflect.Struct:        decodeUnsupported, | ||||
| 		reflect.UnsafePointer: decodeUnsupported, | ||||
| 	} | ||||
|  | ||||
| 	// Global map of struct field specs that is populated once for every new | ||||
| 	// struct type that is scanned. This caches the field types and the corresponding | ||||
| 	// decoder functions to avoid iterating through struct fields on subsequent scans. | ||||
| 	globalStructMap = newStructMap() | ||||
| ) | ||||
|  | ||||
| func Struct(dst interface{}) (StructValue, error) { | ||||
| 	v := reflect.ValueOf(dst) | ||||
|  | ||||
| 	// The dstination to scan into should be a struct pointer. | ||||
| 	if v.Kind() != reflect.Ptr || v.IsNil() { | ||||
| 		return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst) | ||||
| 	} | ||||
|  | ||||
| 	v = v.Elem() | ||||
| 	if v.Kind() != reflect.Struct { | ||||
| 		return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst) | ||||
| 	} | ||||
|  | ||||
| 	return StructValue{ | ||||
| 		spec:  globalStructMap.get(v.Type()), | ||||
| 		value: v, | ||||
| 	}, nil | ||||
| } | ||||
|  | ||||
| // Scan scans the results from a key-value Redis map result set to a destination struct. | ||||
| // The Redis keys are matched to the struct's field with the `redis` tag. | ||||
| func Scan(dst interface{}, keys []interface{}, vals []interface{}) error { | ||||
| 	if len(keys) != len(vals) { | ||||
| 		return errors.New("args should have the same number of keys and vals") | ||||
| 	} | ||||
|  | ||||
| 	strct, err := Struct(dst) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	// Iterate through the (key, value) sequence. | ||||
| 	for i := 0; i < len(vals); i++ { | ||||
| 		key, ok := keys[i].(string) | ||||
| 		if !ok { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		val, ok := vals[i].(string) | ||||
| 		if !ok { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		if err := strct.Scan(key, val); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func decodeBool(f reflect.Value, s string) error { | ||||
| 	b, err := strconv.ParseBool(s) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	f.SetBool(b) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func decodeInt(f reflect.Value, s string) error { | ||||
| 	v, err := strconv.ParseInt(s, 10, 0) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	f.SetInt(v) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func decodeUint(f reflect.Value, s string) error { | ||||
| 	v, err := strconv.ParseUint(s, 10, 0) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	f.SetUint(v) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func decodeFloat(f reflect.Value, s string) error { | ||||
| 	v, err := strconv.ParseFloat(s, 0) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	f.SetFloat(v) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func decodeString(f reflect.Value, s string) error { | ||||
| 	f.SetString(s) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func decodeSlice(f reflect.Value, s string) error { | ||||
| 	// []byte slice ([]uint8). | ||||
| 	if f.Type().Elem().Kind() == reflect.Uint8 { | ||||
| 		f.SetBytes([]byte(s)) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func decodeUnsupported(v reflect.Value, s string) error { | ||||
| 	return fmt.Errorf("redis.Scan(unsupported %s)", v.Type()) | ||||
| } | ||||
							
								
								
									
										87
									
								
								vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										87
									
								
								vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,87 @@ | ||||
| package hscan | ||||
|  | ||||
| import ( | ||||
| 	"reflect" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // structMap contains the map of struct fields for target structs | ||||
| // indexed by the struct type. | ||||
| type structMap struct { | ||||
| 	m sync.Map | ||||
| } | ||||
|  | ||||
| func newStructMap() *structMap { | ||||
| 	return new(structMap) | ||||
| } | ||||
|  | ||||
| func (s *structMap) get(t reflect.Type) *structSpec { | ||||
| 	if v, ok := s.m.Load(t); ok { | ||||
| 		return v.(*structSpec) | ||||
| 	} | ||||
|  | ||||
| 	spec := newStructSpec(t, "redis") | ||||
| 	s.m.Store(t, spec) | ||||
| 	return spec | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| // structSpec contains the list of all fields in a target struct. | ||||
| type structSpec struct { | ||||
| 	m map[string]*structField | ||||
| } | ||||
|  | ||||
| func (s *structSpec) set(tag string, sf *structField) { | ||||
| 	s.m[tag] = sf | ||||
| } | ||||
|  | ||||
| func newStructSpec(t reflect.Type, fieldTag string) *structSpec { | ||||
| 	out := &structSpec{ | ||||
| 		m: make(map[string]*structField), | ||||
| 	} | ||||
|  | ||||
| 	num := t.NumField() | ||||
| 	for i := 0; i < num; i++ { | ||||
| 		f := t.Field(i) | ||||
|  | ||||
| 		tag := f.Tag.Get(fieldTag) | ||||
| 		if tag == "" || tag == "-" { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		tag = strings.Split(tag, ",")[0] | ||||
| 		if tag == "" { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		// Use the built-in decoder. | ||||
| 		out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]}) | ||||
| 	} | ||||
|  | ||||
| 	return out | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| // structField represents a single field in a target struct. | ||||
| type structField struct { | ||||
| 	index int | ||||
| 	fn    decoderFunc | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type StructValue struct { | ||||
| 	spec  *structSpec | ||||
| 	value reflect.Value | ||||
| } | ||||
|  | ||||
| func (s StructValue) Scan(key string, value string) error { | ||||
| 	field, ok := s.spec.m[key] | ||||
| 	if !ok { | ||||
| 		return nil | ||||
| 	} | ||||
| 	return field.fn(s.value.Field(field.index), value) | ||||
| } | ||||
							
								
								
									
										33
									
								
								vendor/github.com/go-redis/redis/v8/internal/instruments.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										33
									
								
								vendor/github.com/go-redis/redis/v8/internal/instruments.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,33 @@ | ||||
| package internal | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	"go.opentelemetry.io/otel" | ||||
| 	"go.opentelemetry.io/otel/metric" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	// WritesCounter is a count of write commands performed. | ||||
| 	WritesCounter metric.Int64Counter | ||||
| 	// NewConnectionsCounter is a count of new connections. | ||||
| 	NewConnectionsCounter metric.Int64Counter | ||||
| ) | ||||
|  | ||||
| func init() { | ||||
| 	defer func() { | ||||
| 		if r := recover(); r != nil { | ||||
| 			Logger.Printf(context.Background(), "Error creating meter github.com/go-redis/redis for Instruments", r) | ||||
| 		} | ||||
| 	}() | ||||
|  | ||||
| 	meter := metric.Must(otel.Meter("github.com/go-redis/redis")) | ||||
|  | ||||
| 	WritesCounter = meter.NewInt64Counter("redis.writes", | ||||
| 		metric.WithDescription("the number of writes initiated"), | ||||
| 	) | ||||
|  | ||||
| 	NewConnectionsCounter = meter.NewInt64Counter("redis.new_connections", | ||||
| 		metric.WithDescription("the number of connections created"), | ||||
| 	) | ||||
| } | ||||
							
								
								
									
										29
									
								
								vendor/github.com/go-redis/redis/v8/internal/internal.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										29
									
								
								vendor/github.com/go-redis/redis/v8/internal/internal.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,29 @@ | ||||
| package internal | ||||
|  | ||||
| import ( | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal/rand" | ||||
| ) | ||||
|  | ||||
| func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration { | ||||
| 	if retry < 0 { | ||||
| 		panic("not reached") | ||||
| 	} | ||||
| 	if minBackoff == 0 { | ||||
| 		return 0 | ||||
| 	} | ||||
|  | ||||
| 	d := minBackoff << uint(retry) | ||||
| 	if d < minBackoff { | ||||
| 		return maxBackoff | ||||
| 	} | ||||
|  | ||||
| 	d = minBackoff + time.Duration(rand.Int63n(int64(d))) | ||||
|  | ||||
| 	if d > maxBackoff || d < minBackoff { | ||||
| 		d = maxBackoff | ||||
| 	} | ||||
|  | ||||
| 	return d | ||||
| } | ||||
							
								
								
									
										24
									
								
								vendor/github.com/go-redis/redis/v8/internal/log.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								vendor/github.com/go-redis/redis/v8/internal/log.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| package internal | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"log" | ||||
| 	"os" | ||||
| ) | ||||
|  | ||||
| type Logging interface { | ||||
| 	Printf(ctx context.Context, format string, v ...interface{}) | ||||
| } | ||||
|  | ||||
| type logger struct { | ||||
| 	log *log.Logger | ||||
| } | ||||
|  | ||||
| func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) { | ||||
| 	_ = l.log.Output(2, fmt.Sprintf(format, v...)) | ||||
| } | ||||
|  | ||||
| var Logger Logging = &logger{ | ||||
| 	log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile), | ||||
| } | ||||
							
								
								
									
										60
									
								
								vendor/github.com/go-redis/redis/v8/internal/once.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										60
									
								
								vendor/github.com/go-redis/redis/v8/internal/once.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,60 @@ | ||||
| /* | ||||
| Copyright 2014 The Camlistore Authors | ||||
|  | ||||
| Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| you may not use this file except in compliance with the License. | ||||
| You may obtain a copy of the License at | ||||
|  | ||||
|      http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
| Unless required by applicable law or agreed to in writing, software | ||||
| distributed under the License is distributed on an "AS IS" BASIS, | ||||
| WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| See the License for the specific language governing permissions and | ||||
| limitations under the License. | ||||
| */ | ||||
|  | ||||
| package internal | ||||
|  | ||||
| import ( | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| ) | ||||
|  | ||||
| // A Once will perform a successful action exactly once. | ||||
| // | ||||
| // Unlike a sync.Once, this Once's func returns an error | ||||
| // and is re-armed on failure. | ||||
| type Once struct { | ||||
| 	m    sync.Mutex | ||||
| 	done uint32 | ||||
| } | ||||
|  | ||||
| // Do calls the function f if and only if Do has not been invoked | ||||
| // without error for this instance of Once.  In other words, given | ||||
| // 	var once Once | ||||
| // if once.Do(f) is called multiple times, only the first call will | ||||
| // invoke f, even if f has a different value in each invocation unless | ||||
| // f returns an error.  A new instance of Once is required for each | ||||
| // function to execute. | ||||
| // | ||||
| // Do is intended for initialization that must be run exactly once.  Since f | ||||
| // is niladic, it may be necessary to use a function literal to capture the | ||||
| // arguments to a function to be invoked by Do: | ||||
| // 	err := config.once.Do(func() error { return config.init(filename) }) | ||||
| func (o *Once) Do(f func() error) error { | ||||
| 	if atomic.LoadUint32(&o.done) == 1 { | ||||
| 		return nil | ||||
| 	} | ||||
| 	// Slow-path. | ||||
| 	o.m.Lock() | ||||
| 	defer o.m.Unlock() | ||||
| 	var err error | ||||
| 	if o.done == 0 { | ||||
| 		err = f() | ||||
| 		if err == nil { | ||||
| 			atomic.StoreUint32(&o.done, 1) | ||||
| 		} | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
							
								
								
									
										136
									
								
								vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										136
									
								
								vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,136 @@ | ||||
| package pool | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"context" | ||||
| 	"net" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal" | ||||
| 	"github.com/go-redis/redis/v8/internal/proto" | ||||
| 	"go.opentelemetry.io/otel/trace" | ||||
| ) | ||||
|  | ||||
| var noDeadline = time.Time{} | ||||
|  | ||||
| type Conn struct { | ||||
| 	usedAt  int64 // atomic | ||||
| 	netConn net.Conn | ||||
|  | ||||
| 	rd *proto.Reader | ||||
| 	bw *bufio.Writer | ||||
| 	wr *proto.Writer | ||||
|  | ||||
| 	Inited    bool | ||||
| 	pooled    bool | ||||
| 	createdAt time.Time | ||||
| } | ||||
|  | ||||
| func NewConn(netConn net.Conn) *Conn { | ||||
| 	cn := &Conn{ | ||||
| 		netConn:   netConn, | ||||
| 		createdAt: time.Now(), | ||||
| 	} | ||||
| 	cn.rd = proto.NewReader(netConn) | ||||
| 	cn.bw = bufio.NewWriter(netConn) | ||||
| 	cn.wr = proto.NewWriter(cn.bw) | ||||
| 	cn.SetUsedAt(time.Now()) | ||||
| 	return cn | ||||
| } | ||||
|  | ||||
| func (cn *Conn) UsedAt() time.Time { | ||||
| 	unix := atomic.LoadInt64(&cn.usedAt) | ||||
| 	return time.Unix(unix, 0) | ||||
| } | ||||
|  | ||||
| func (cn *Conn) SetUsedAt(tm time.Time) { | ||||
| 	atomic.StoreInt64(&cn.usedAt, tm.Unix()) | ||||
| } | ||||
|  | ||||
| func (cn *Conn) SetNetConn(netConn net.Conn) { | ||||
| 	cn.netConn = netConn | ||||
| 	cn.rd.Reset(netConn) | ||||
| 	cn.bw.Reset(netConn) | ||||
| } | ||||
|  | ||||
| func (cn *Conn) Write(b []byte) (int, error) { | ||||
| 	return cn.netConn.Write(b) | ||||
| } | ||||
|  | ||||
| func (cn *Conn) RemoteAddr() net.Addr { | ||||
| 	if cn.netConn != nil { | ||||
| 		return cn.netConn.RemoteAddr() | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error { | ||||
| 	return internal.WithSpan(ctx, "redis.with_reader", func(ctx context.Context, span trace.Span) error { | ||||
| 		if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil { | ||||
| 			return internal.RecordError(ctx, span, err) | ||||
| 		} | ||||
| 		if err := fn(cn.rd); err != nil { | ||||
| 			return internal.RecordError(ctx, span, err) | ||||
| 		} | ||||
| 		return nil | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (cn *Conn) WithWriter( | ||||
| 	ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error, | ||||
| ) error { | ||||
| 	return internal.WithSpan(ctx, "redis.with_writer", func(ctx context.Context, span trace.Span) error { | ||||
| 		if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil { | ||||
| 			return internal.RecordError(ctx, span, err) | ||||
| 		} | ||||
|  | ||||
| 		if cn.bw.Buffered() > 0 { | ||||
| 			cn.bw.Reset(cn.netConn) | ||||
| 		} | ||||
|  | ||||
| 		if err := fn(cn.wr); err != nil { | ||||
| 			return internal.RecordError(ctx, span, err) | ||||
| 		} | ||||
|  | ||||
| 		if err := cn.bw.Flush(); err != nil { | ||||
| 			return internal.RecordError(ctx, span, err) | ||||
| 		} | ||||
|  | ||||
| 		internal.WritesCounter.Add(ctx, 1) | ||||
|  | ||||
| 		return nil | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (cn *Conn) Close() error { | ||||
| 	return cn.netConn.Close() | ||||
| } | ||||
|  | ||||
| func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time { | ||||
| 	tm := time.Now() | ||||
| 	cn.SetUsedAt(tm) | ||||
|  | ||||
| 	if timeout > 0 { | ||||
| 		tm = tm.Add(timeout) | ||||
| 	} | ||||
|  | ||||
| 	if ctx != nil { | ||||
| 		deadline, ok := ctx.Deadline() | ||||
| 		if ok { | ||||
| 			if timeout == 0 { | ||||
| 				return deadline | ||||
| 			} | ||||
| 			if deadline.Before(tm) { | ||||
| 				return deadline | ||||
| 			} | ||||
| 			return tm | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if timeout > 0 { | ||||
| 		return tm | ||||
| 	} | ||||
|  | ||||
| 	return noDeadline | ||||
| } | ||||
							
								
								
									
										525
									
								
								vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										525
									
								
								vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,525 @@ | ||||
| package pool | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"net" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	ErrClosed      = errors.New("redis: client is closed") | ||||
| 	ErrPoolTimeout = errors.New("redis: connection pool timeout") | ||||
| ) | ||||
|  | ||||
| var timers = sync.Pool{ | ||||
| 	New: func() interface{} { | ||||
| 		t := time.NewTimer(time.Hour) | ||||
| 		t.Stop() | ||||
| 		return t | ||||
| 	}, | ||||
| } | ||||
|  | ||||
| // Stats contains pool state information and accumulated stats. | ||||
| type Stats struct { | ||||
| 	Hits     uint32 // number of times free connection was found in the pool | ||||
| 	Misses   uint32 // number of times free connection was NOT found in the pool | ||||
| 	Timeouts uint32 // number of times a wait timeout occurred | ||||
|  | ||||
| 	TotalConns uint32 // number of total connections in the pool | ||||
| 	IdleConns  uint32 // number of idle connections in the pool | ||||
| 	StaleConns uint32 // number of stale connections removed from the pool | ||||
| } | ||||
|  | ||||
| type Pooler interface { | ||||
| 	NewConn(context.Context) (*Conn, error) | ||||
| 	CloseConn(*Conn) error | ||||
|  | ||||
| 	Get(context.Context) (*Conn, error) | ||||
| 	Put(context.Context, *Conn) | ||||
| 	Remove(context.Context, *Conn, error) | ||||
|  | ||||
| 	Len() int | ||||
| 	IdleLen() int | ||||
| 	Stats() *Stats | ||||
|  | ||||
| 	Close() error | ||||
| } | ||||
|  | ||||
| type Options struct { | ||||
| 	Dialer  func(context.Context) (net.Conn, error) | ||||
| 	OnClose func(*Conn) error | ||||
|  | ||||
| 	PoolSize           int | ||||
| 	MinIdleConns       int | ||||
| 	MaxConnAge         time.Duration | ||||
| 	PoolTimeout        time.Duration | ||||
| 	IdleTimeout        time.Duration | ||||
| 	IdleCheckFrequency time.Duration | ||||
| } | ||||
|  | ||||
| type lastDialErrorWrap struct { | ||||
| 	err error | ||||
| } | ||||
|  | ||||
| type ConnPool struct { | ||||
| 	opt *Options | ||||
|  | ||||
| 	dialErrorsNum uint32 // atomic | ||||
|  | ||||
| 	lastDialError atomic.Value | ||||
|  | ||||
| 	queue chan struct{} | ||||
|  | ||||
| 	connsMu      sync.Mutex | ||||
| 	conns        []*Conn | ||||
| 	idleConns    []*Conn | ||||
| 	poolSize     int | ||||
| 	idleConnsLen int | ||||
|  | ||||
| 	stats Stats | ||||
|  | ||||
| 	_closed  uint32 // atomic | ||||
| 	closedCh chan struct{} | ||||
| } | ||||
|  | ||||
| var _ Pooler = (*ConnPool)(nil) | ||||
|  | ||||
| func NewConnPool(opt *Options) *ConnPool { | ||||
| 	p := &ConnPool{ | ||||
| 		opt: opt, | ||||
|  | ||||
| 		queue:     make(chan struct{}, opt.PoolSize), | ||||
| 		conns:     make([]*Conn, 0, opt.PoolSize), | ||||
| 		idleConns: make([]*Conn, 0, opt.PoolSize), | ||||
| 		closedCh:  make(chan struct{}), | ||||
| 	} | ||||
|  | ||||
| 	p.connsMu.Lock() | ||||
| 	p.checkMinIdleConns() | ||||
| 	p.connsMu.Unlock() | ||||
|  | ||||
| 	if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 { | ||||
| 		go p.reaper(opt.IdleCheckFrequency) | ||||
| 	} | ||||
|  | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) checkMinIdleConns() { | ||||
| 	if p.opt.MinIdleConns == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns { | ||||
| 		p.poolSize++ | ||||
| 		p.idleConnsLen++ | ||||
| 		go func() { | ||||
| 			err := p.addIdleConn() | ||||
| 			if err != nil { | ||||
| 				p.connsMu.Lock() | ||||
| 				p.poolSize-- | ||||
| 				p.idleConnsLen-- | ||||
| 				p.connsMu.Unlock() | ||||
| 			} | ||||
| 		}() | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) addIdleConn() error { | ||||
| 	cn, err := p.dialConn(context.TODO(), true) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	p.connsMu.Lock() | ||||
| 	p.conns = append(p.conns, cn) | ||||
| 	p.idleConns = append(p.idleConns, cn) | ||||
| 	p.connsMu.Unlock() | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) { | ||||
| 	return p.newConn(ctx, false) | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) { | ||||
| 	cn, err := p.dialConn(ctx, pooled) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	p.connsMu.Lock() | ||||
| 	p.conns = append(p.conns, cn) | ||||
| 	if pooled { | ||||
| 		// If pool is full remove the cn on next Put. | ||||
| 		if p.poolSize >= p.opt.PoolSize { | ||||
| 			cn.pooled = false | ||||
| 		} else { | ||||
| 			p.poolSize++ | ||||
| 		} | ||||
| 	} | ||||
| 	p.connsMu.Unlock() | ||||
|  | ||||
| 	return cn, nil | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) { | ||||
| 	if p.closed() { | ||||
| 		return nil, ErrClosed | ||||
| 	} | ||||
|  | ||||
| 	if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) { | ||||
| 		return nil, p.getLastDialError() | ||||
| 	} | ||||
|  | ||||
| 	netConn, err := p.opt.Dialer(ctx) | ||||
| 	if err != nil { | ||||
| 		p.setLastDialError(err) | ||||
| 		if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) { | ||||
| 			go p.tryDial() | ||||
| 		} | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	internal.NewConnectionsCounter.Add(ctx, 1) | ||||
| 	cn := NewConn(netConn) | ||||
| 	cn.pooled = pooled | ||||
| 	return cn, nil | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) tryDial() { | ||||
| 	for { | ||||
| 		if p.closed() { | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		conn, err := p.opt.Dialer(context.Background()) | ||||
| 		if err != nil { | ||||
| 			p.setLastDialError(err) | ||||
| 			time.Sleep(time.Second) | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		atomic.StoreUint32(&p.dialErrorsNum, 0) | ||||
| 		_ = conn.Close() | ||||
| 		return | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) setLastDialError(err error) { | ||||
| 	p.lastDialError.Store(&lastDialErrorWrap{err: err}) | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) getLastDialError() error { | ||||
| 	err, _ := p.lastDialError.Load().(*lastDialErrorWrap) | ||||
| 	if err != nil { | ||||
| 		return err.err | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Get returns existed connection from the pool or creates a new one. | ||||
| func (p *ConnPool) Get(ctx context.Context) (*Conn, error) { | ||||
| 	if p.closed() { | ||||
| 		return nil, ErrClosed | ||||
| 	} | ||||
|  | ||||
| 	err := p.waitTurn(ctx) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	for { | ||||
| 		p.connsMu.Lock() | ||||
| 		cn := p.popIdle() | ||||
| 		p.connsMu.Unlock() | ||||
|  | ||||
| 		if cn == nil { | ||||
| 			break | ||||
| 		} | ||||
|  | ||||
| 		if p.isStaleConn(cn) { | ||||
| 			_ = p.CloseConn(cn) | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		atomic.AddUint32(&p.stats.Hits, 1) | ||||
| 		return cn, nil | ||||
| 	} | ||||
|  | ||||
| 	atomic.AddUint32(&p.stats.Misses, 1) | ||||
|  | ||||
| 	newcn, err := p.newConn(ctx, true) | ||||
| 	if err != nil { | ||||
| 		p.freeTurn() | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return newcn, nil | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) getTurn() { | ||||
| 	p.queue <- struct{}{} | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) waitTurn(ctx context.Context) error { | ||||
| 	select { | ||||
| 	case <-ctx.Done(): | ||||
| 		return ctx.Err() | ||||
| 	default: | ||||
| 	} | ||||
|  | ||||
| 	select { | ||||
| 	case p.queue <- struct{}{}: | ||||
| 		return nil | ||||
| 	default: | ||||
| 	} | ||||
|  | ||||
| 	timer := timers.Get().(*time.Timer) | ||||
| 	timer.Reset(p.opt.PoolTimeout) | ||||
|  | ||||
| 	select { | ||||
| 	case <-ctx.Done(): | ||||
| 		if !timer.Stop() { | ||||
| 			<-timer.C | ||||
| 		} | ||||
| 		timers.Put(timer) | ||||
| 		return ctx.Err() | ||||
| 	case p.queue <- struct{}{}: | ||||
| 		if !timer.Stop() { | ||||
| 			<-timer.C | ||||
| 		} | ||||
| 		timers.Put(timer) | ||||
| 		return nil | ||||
| 	case <-timer.C: | ||||
| 		timers.Put(timer) | ||||
| 		atomic.AddUint32(&p.stats.Timeouts, 1) | ||||
| 		return ErrPoolTimeout | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) freeTurn() { | ||||
| 	<-p.queue | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) popIdle() *Conn { | ||||
| 	if len(p.idleConns) == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	idx := len(p.idleConns) - 1 | ||||
| 	cn := p.idleConns[idx] | ||||
| 	p.idleConns = p.idleConns[:idx] | ||||
| 	p.idleConnsLen-- | ||||
| 	p.checkMinIdleConns() | ||||
| 	return cn | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) Put(ctx context.Context, cn *Conn) { | ||||
| 	if cn.rd.Buffered() > 0 { | ||||
| 		internal.Logger.Printf(ctx, "Conn has unread data") | ||||
| 		p.Remove(ctx, cn, BadConnError{}) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	if !cn.pooled { | ||||
| 		p.Remove(ctx, cn, nil) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	p.connsMu.Lock() | ||||
| 	p.idleConns = append(p.idleConns, cn) | ||||
| 	p.idleConnsLen++ | ||||
| 	p.connsMu.Unlock() | ||||
| 	p.freeTurn() | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) { | ||||
| 	p.removeConnWithLock(cn) | ||||
| 	p.freeTurn() | ||||
| 	_ = p.closeConn(cn) | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) CloseConn(cn *Conn) error { | ||||
| 	p.removeConnWithLock(cn) | ||||
| 	return p.closeConn(cn) | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) removeConnWithLock(cn *Conn) { | ||||
| 	p.connsMu.Lock() | ||||
| 	p.removeConn(cn) | ||||
| 	p.connsMu.Unlock() | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) removeConn(cn *Conn) { | ||||
| 	for i, c := range p.conns { | ||||
| 		if c == cn { | ||||
| 			p.conns = append(p.conns[:i], p.conns[i+1:]...) | ||||
| 			if cn.pooled { | ||||
| 				p.poolSize-- | ||||
| 				p.checkMinIdleConns() | ||||
| 			} | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) closeConn(cn *Conn) error { | ||||
| 	if p.opt.OnClose != nil { | ||||
| 		_ = p.opt.OnClose(cn) | ||||
| 	} | ||||
| 	return cn.Close() | ||||
| } | ||||
|  | ||||
| // Len returns total number of connections. | ||||
| func (p *ConnPool) Len() int { | ||||
| 	p.connsMu.Lock() | ||||
| 	n := len(p.conns) | ||||
| 	p.connsMu.Unlock() | ||||
| 	return n | ||||
| } | ||||
|  | ||||
| // IdleLen returns number of idle connections. | ||||
| func (p *ConnPool) IdleLen() int { | ||||
| 	p.connsMu.Lock() | ||||
| 	n := p.idleConnsLen | ||||
| 	p.connsMu.Unlock() | ||||
| 	return n | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) Stats() *Stats { | ||||
| 	idleLen := p.IdleLen() | ||||
| 	return &Stats{ | ||||
| 		Hits:     atomic.LoadUint32(&p.stats.Hits), | ||||
| 		Misses:   atomic.LoadUint32(&p.stats.Misses), | ||||
| 		Timeouts: atomic.LoadUint32(&p.stats.Timeouts), | ||||
|  | ||||
| 		TotalConns: uint32(p.Len()), | ||||
| 		IdleConns:  uint32(idleLen), | ||||
| 		StaleConns: atomic.LoadUint32(&p.stats.StaleConns), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) closed() bool { | ||||
| 	return atomic.LoadUint32(&p._closed) == 1 | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) Filter(fn func(*Conn) bool) error { | ||||
| 	p.connsMu.Lock() | ||||
| 	defer p.connsMu.Unlock() | ||||
|  | ||||
| 	var firstErr error | ||||
| 	for _, cn := range p.conns { | ||||
| 		if fn(cn) { | ||||
| 			if err := p.closeConn(cn); err != nil && firstErr == nil { | ||||
| 				firstErr = err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return firstErr | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) Close() error { | ||||
| 	if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) { | ||||
| 		return ErrClosed | ||||
| 	} | ||||
| 	close(p.closedCh) | ||||
|  | ||||
| 	var firstErr error | ||||
| 	p.connsMu.Lock() | ||||
| 	for _, cn := range p.conns { | ||||
| 		if err := p.closeConn(cn); err != nil && firstErr == nil { | ||||
| 			firstErr = err | ||||
| 		} | ||||
| 	} | ||||
| 	p.conns = nil | ||||
| 	p.poolSize = 0 | ||||
| 	p.idleConns = nil | ||||
| 	p.idleConnsLen = 0 | ||||
| 	p.connsMu.Unlock() | ||||
|  | ||||
| 	return firstErr | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) reaper(frequency time.Duration) { | ||||
| 	ticker := time.NewTicker(frequency) | ||||
| 	defer ticker.Stop() | ||||
|  | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-ticker.C: | ||||
| 			// It is possible that ticker and closedCh arrive together, | ||||
| 			// and select pseudo-randomly pick ticker case, we double | ||||
| 			// check here to prevent being executed after closed. | ||||
| 			if p.closed() { | ||||
| 				return | ||||
| 			} | ||||
| 			_, err := p.ReapStaleConns() | ||||
| 			if err != nil { | ||||
| 				internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err) | ||||
| 				continue | ||||
| 			} | ||||
| 		case <-p.closedCh: | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) ReapStaleConns() (int, error) { | ||||
| 	var n int | ||||
| 	for { | ||||
| 		p.getTurn() | ||||
|  | ||||
| 		p.connsMu.Lock() | ||||
| 		cn := p.reapStaleConn() | ||||
| 		p.connsMu.Unlock() | ||||
|  | ||||
| 		p.freeTurn() | ||||
|  | ||||
| 		if cn != nil { | ||||
| 			_ = p.closeConn(cn) | ||||
| 			n++ | ||||
| 		} else { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	atomic.AddUint32(&p.stats.StaleConns, uint32(n)) | ||||
| 	return n, nil | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) reapStaleConn() *Conn { | ||||
| 	if len(p.idleConns) == 0 { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	cn := p.idleConns[0] | ||||
| 	if !p.isStaleConn(cn) { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...) | ||||
| 	p.idleConnsLen-- | ||||
| 	p.removeConn(cn) | ||||
|  | ||||
| 	return cn | ||||
| } | ||||
|  | ||||
| func (p *ConnPool) isStaleConn(cn *Conn) bool { | ||||
| 	if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	now := time.Now() | ||||
| 	if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout { | ||||
| 		return true | ||||
| 	} | ||||
| 	if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge { | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	return false | ||||
| } | ||||
							
								
								
									
										58
									
								
								vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										58
									
								
								vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,58 @@ | ||||
| package pool | ||||
|  | ||||
| import "context" | ||||
|  | ||||
| type SingleConnPool struct { | ||||
| 	pool      Pooler | ||||
| 	cn        *Conn | ||||
| 	stickyErr error | ||||
| } | ||||
|  | ||||
| var _ Pooler = (*SingleConnPool)(nil) | ||||
|  | ||||
| func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool { | ||||
| 	return &SingleConnPool{ | ||||
| 		pool: pool, | ||||
| 		cn:   cn, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) { | ||||
| 	return p.pool.NewConn(ctx) | ||||
| } | ||||
|  | ||||
| func (p *SingleConnPool) CloseConn(cn *Conn) error { | ||||
| 	return p.pool.CloseConn(cn) | ||||
| } | ||||
|  | ||||
| func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) { | ||||
| 	if p.stickyErr != nil { | ||||
| 		return nil, p.stickyErr | ||||
| 	} | ||||
| 	return p.cn, nil | ||||
| } | ||||
|  | ||||
| func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {} | ||||
|  | ||||
| func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) { | ||||
| 	p.cn = nil | ||||
| 	p.stickyErr = reason | ||||
| } | ||||
|  | ||||
| func (p *SingleConnPool) Close() error { | ||||
| 	p.cn = nil | ||||
| 	p.stickyErr = ErrClosed | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (p *SingleConnPool) Len() int { | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (p *SingleConnPool) IdleLen() int { | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| func (p *SingleConnPool) Stats() *Stats { | ||||
| 	return &Stats{} | ||||
| } | ||||
							
								
								
									
										202
									
								
								vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										202
									
								
								vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,202 @@ | ||||
| package pool | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"sync/atomic" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	stateDefault = 0 | ||||
| 	stateInited  = 1 | ||||
| 	stateClosed  = 2 | ||||
| ) | ||||
|  | ||||
| type BadConnError struct { | ||||
| 	wrapped error | ||||
| } | ||||
|  | ||||
| var _ error = (*BadConnError)(nil) | ||||
|  | ||||
| func (e BadConnError) Error() string { | ||||
| 	s := "redis: Conn is in a bad state" | ||||
| 	if e.wrapped != nil { | ||||
| 		s += ": " + e.wrapped.Error() | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| func (e BadConnError) Unwrap() error { | ||||
| 	return e.wrapped | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type StickyConnPool struct { | ||||
| 	pool   Pooler | ||||
| 	shared int32 // atomic | ||||
|  | ||||
| 	state uint32 // atomic | ||||
| 	ch    chan *Conn | ||||
|  | ||||
| 	_badConnError atomic.Value | ||||
| } | ||||
|  | ||||
| var _ Pooler = (*StickyConnPool)(nil) | ||||
|  | ||||
| func NewStickyConnPool(pool Pooler) *StickyConnPool { | ||||
| 	p, ok := pool.(*StickyConnPool) | ||||
| 	if !ok { | ||||
| 		p = &StickyConnPool{ | ||||
| 			pool: pool, | ||||
| 			ch:   make(chan *Conn, 1), | ||||
| 		} | ||||
| 	} | ||||
| 	atomic.AddInt32(&p.shared, 1) | ||||
| 	return p | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) { | ||||
| 	return p.pool.NewConn(ctx) | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) CloseConn(cn *Conn) error { | ||||
| 	return p.pool.CloseConn(cn) | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) { | ||||
| 	// In worst case this races with Close which is not a very common operation. | ||||
| 	for i := 0; i < 1000; i++ { | ||||
| 		switch atomic.LoadUint32(&p.state) { | ||||
| 		case stateDefault: | ||||
| 			cn, err := p.pool.Get(ctx) | ||||
| 			if err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) { | ||||
| 				return cn, nil | ||||
| 			} | ||||
| 			p.pool.Remove(ctx, cn, ErrClosed) | ||||
| 		case stateInited: | ||||
| 			if err := p.badConnError(); err != nil { | ||||
| 				return nil, err | ||||
| 			} | ||||
| 			cn, ok := <-p.ch | ||||
| 			if !ok { | ||||
| 				return nil, ErrClosed | ||||
| 			} | ||||
| 			return cn, nil | ||||
| 		case stateClosed: | ||||
| 			return nil, ErrClosed | ||||
| 		default: | ||||
| 			panic("not reached") | ||||
| 		} | ||||
| 	} | ||||
| 	return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop") | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) { | ||||
| 	defer func() { | ||||
| 		if recover() != nil { | ||||
| 			p.freeConn(ctx, cn) | ||||
| 		} | ||||
| 	}() | ||||
| 	p.ch <- cn | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) { | ||||
| 	if err := p.badConnError(); err != nil { | ||||
| 		p.pool.Remove(ctx, cn, err) | ||||
| 	} else { | ||||
| 		p.pool.Put(ctx, cn) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) { | ||||
| 	defer func() { | ||||
| 		if recover() != nil { | ||||
| 			p.pool.Remove(ctx, cn, ErrClosed) | ||||
| 		} | ||||
| 	}() | ||||
| 	p._badConnError.Store(BadConnError{wrapped: reason}) | ||||
| 	p.ch <- cn | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) Close() error { | ||||
| 	if shared := atomic.AddInt32(&p.shared, -1); shared > 0 { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	for i := 0; i < 1000; i++ { | ||||
| 		state := atomic.LoadUint32(&p.state) | ||||
| 		if state == stateClosed { | ||||
| 			return ErrClosed | ||||
| 		} | ||||
| 		if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) { | ||||
| 			close(p.ch) | ||||
| 			cn, ok := <-p.ch | ||||
| 			if ok { | ||||
| 				p.freeConn(context.TODO(), cn) | ||||
| 			} | ||||
| 			return nil | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return errors.New("redis: StickyConnPool.Close: infinite loop") | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) Reset(ctx context.Context) error { | ||||
| 	if p.badConnError() == nil { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	select { | ||||
| 	case cn, ok := <-p.ch: | ||||
| 		if !ok { | ||||
| 			return ErrClosed | ||||
| 		} | ||||
| 		p.pool.Remove(ctx, cn, ErrClosed) | ||||
| 		p._badConnError.Store(BadConnError{wrapped: nil}) | ||||
| 	default: | ||||
| 		return errors.New("redis: StickyConnPool does not have a Conn") | ||||
| 	} | ||||
|  | ||||
| 	if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) { | ||||
| 		state := atomic.LoadUint32(&p.state) | ||||
| 		return fmt.Errorf("redis: invalid StickyConnPool state: %d", state) | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) badConnError() error { | ||||
| 	if v := p._badConnError.Load(); v != nil { | ||||
| 		err := v.(BadConnError) | ||||
| 		if err.wrapped != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) Len() int { | ||||
| 	switch atomic.LoadUint32(&p.state) { | ||||
| 	case stateDefault: | ||||
| 		return 0 | ||||
| 	case stateInited: | ||||
| 		return 1 | ||||
| 	case stateClosed: | ||||
| 		return 0 | ||||
| 	default: | ||||
| 		panic("not reached") | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) IdleLen() int { | ||||
| 	return len(p.ch) | ||||
| } | ||||
|  | ||||
| func (p *StickyConnPool) Stats() *Stats { | ||||
| 	return &Stats{} | ||||
| } | ||||
							
								
								
									
										331
									
								
								vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										331
									
								
								vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,331 @@ | ||||
| package proto | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal/util" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	ErrorReply  = '-' | ||||
| 	StatusReply = '+' | ||||
| 	IntReply    = ':' | ||||
| 	StringReply = '$' | ||||
| 	ArrayReply  = '*' | ||||
| ) | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| const Nil = RedisError("redis: nil") | ||||
|  | ||||
| type RedisError string | ||||
|  | ||||
| func (e RedisError) Error() string { return string(e) } | ||||
|  | ||||
| func (RedisError) RedisError() {} | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type MultiBulkParse func(*Reader, int64) (interface{}, error) | ||||
|  | ||||
| type Reader struct { | ||||
| 	rd   *bufio.Reader | ||||
| 	_buf []byte | ||||
| } | ||||
|  | ||||
| func NewReader(rd io.Reader) *Reader { | ||||
| 	return &Reader{ | ||||
| 		rd:   bufio.NewReader(rd), | ||||
| 		_buf: make([]byte, 64), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *Reader) Buffered() int { | ||||
| 	return r.rd.Buffered() | ||||
| } | ||||
|  | ||||
| func (r *Reader) Peek(n int) ([]byte, error) { | ||||
| 	return r.rd.Peek(n) | ||||
| } | ||||
|  | ||||
| func (r *Reader) Reset(rd io.Reader) { | ||||
| 	r.rd.Reset(rd) | ||||
| } | ||||
|  | ||||
| func (r *Reader) ReadLine() ([]byte, error) { | ||||
| 	line, err := r.readLine() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	if isNilReply(line) { | ||||
| 		return nil, Nil | ||||
| 	} | ||||
| 	return line, nil | ||||
| } | ||||
|  | ||||
| // readLine that returns an error if: | ||||
| //   - there is a pending read error; | ||||
| //   - or line does not end with \r\n. | ||||
| func (r *Reader) readLine() ([]byte, error) { | ||||
| 	b, err := r.rd.ReadSlice('\n') | ||||
| 	if err != nil { | ||||
| 		if err != bufio.ErrBufferFull { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		full := make([]byte, len(b)) | ||||
| 		copy(full, b) | ||||
|  | ||||
| 		b, err = r.rd.ReadBytes('\n') | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		full = append(full, b...) | ||||
| 		b = full | ||||
| 	} | ||||
| 	if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' { | ||||
| 		return nil, fmt.Errorf("redis: invalid reply: %q", b) | ||||
| 	} | ||||
| 	return b[:len(b)-2], nil | ||||
| } | ||||
|  | ||||
| func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) { | ||||
| 	line, err := r.ReadLine() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	switch line[0] { | ||||
| 	case ErrorReply: | ||||
| 		return nil, ParseErrorReply(line) | ||||
| 	case StatusReply: | ||||
| 		return string(line[1:]), nil | ||||
| 	case IntReply: | ||||
| 		return util.ParseInt(line[1:], 10, 64) | ||||
| 	case StringReply: | ||||
| 		return r.readStringReply(line) | ||||
| 	case ArrayReply: | ||||
| 		n, err := parseArrayLen(line) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		if m == nil { | ||||
| 			err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line) | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		return m(r, n) | ||||
| 	} | ||||
| 	return nil, fmt.Errorf("redis: can't parse %.100q", line) | ||||
| } | ||||
|  | ||||
| func (r *Reader) ReadIntReply() (int64, error) { | ||||
| 	line, err := r.ReadLine() | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	switch line[0] { | ||||
| 	case ErrorReply: | ||||
| 		return 0, ParseErrorReply(line) | ||||
| 	case IntReply: | ||||
| 		return util.ParseInt(line[1:], 10, 64) | ||||
| 	default: | ||||
| 		return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *Reader) ReadString() (string, error) { | ||||
| 	line, err := r.ReadLine() | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	switch line[0] { | ||||
| 	case ErrorReply: | ||||
| 		return "", ParseErrorReply(line) | ||||
| 	case StringReply: | ||||
| 		return r.readStringReply(line) | ||||
| 	case StatusReply: | ||||
| 		return string(line[1:]), nil | ||||
| 	case IntReply: | ||||
| 		return string(line[1:]), nil | ||||
| 	default: | ||||
| 		return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *Reader) readStringReply(line []byte) (string, error) { | ||||
| 	if isNilReply(line) { | ||||
| 		return "", Nil | ||||
| 	} | ||||
|  | ||||
| 	replyLen, err := util.Atoi(line[1:]) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	b := make([]byte, replyLen+2) | ||||
| 	_, err = io.ReadFull(r.rd, b) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
|  | ||||
| 	return util.BytesToString(b[:replyLen]), nil | ||||
| } | ||||
|  | ||||
| func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) { | ||||
| 	line, err := r.ReadLine() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	switch line[0] { | ||||
| 	case ErrorReply: | ||||
| 		return nil, ParseErrorReply(line) | ||||
| 	case ArrayReply: | ||||
| 		n, err := parseArrayLen(line) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		return m(r, n) | ||||
| 	default: | ||||
| 		return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *Reader) ReadArrayLen() (int, error) { | ||||
| 	line, err := r.ReadLine() | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	switch line[0] { | ||||
| 	case ErrorReply: | ||||
| 		return 0, ParseErrorReply(line) | ||||
| 	case ArrayReply: | ||||
| 		n, err := parseArrayLen(line) | ||||
| 		if err != nil { | ||||
| 			return 0, err | ||||
| 		} | ||||
| 		return int(n), nil | ||||
| 	default: | ||||
| 		return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *Reader) ReadScanReply() ([]string, uint64, error) { | ||||
| 	n, err := r.ReadArrayLen() | ||||
| 	if err != nil { | ||||
| 		return nil, 0, err | ||||
| 	} | ||||
| 	if n != 2 { | ||||
| 		return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n) | ||||
| 	} | ||||
|  | ||||
| 	cursor, err := r.ReadUint() | ||||
| 	if err != nil { | ||||
| 		return nil, 0, err | ||||
| 	} | ||||
|  | ||||
| 	n, err = r.ReadArrayLen() | ||||
| 	if err != nil { | ||||
| 		return nil, 0, err | ||||
| 	} | ||||
|  | ||||
| 	keys := make([]string, n) | ||||
|  | ||||
| 	for i := 0; i < n; i++ { | ||||
| 		key, err := r.ReadString() | ||||
| 		if err != nil { | ||||
| 			return nil, 0, err | ||||
| 		} | ||||
| 		keys[i] = key | ||||
| 	} | ||||
|  | ||||
| 	return keys, cursor, err | ||||
| } | ||||
|  | ||||
| func (r *Reader) ReadInt() (int64, error) { | ||||
| 	b, err := r.readTmpBytesReply() | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	return util.ParseInt(b, 10, 64) | ||||
| } | ||||
|  | ||||
| func (r *Reader) ReadUint() (uint64, error) { | ||||
| 	b, err := r.readTmpBytesReply() | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	return util.ParseUint(b, 10, 64) | ||||
| } | ||||
|  | ||||
| func (r *Reader) ReadFloatReply() (float64, error) { | ||||
| 	b, err := r.readTmpBytesReply() | ||||
| 	if err != nil { | ||||
| 		return 0, err | ||||
| 	} | ||||
| 	return util.ParseFloat(b, 64) | ||||
| } | ||||
|  | ||||
| func (r *Reader) readTmpBytesReply() ([]byte, error) { | ||||
| 	line, err := r.ReadLine() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	switch line[0] { | ||||
| 	case ErrorReply: | ||||
| 		return nil, ParseErrorReply(line) | ||||
| 	case StringReply: | ||||
| 		return r._readTmpBytesReply(line) | ||||
| 	case StatusReply: | ||||
| 		return line[1:], nil | ||||
| 	default: | ||||
| 		return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) { | ||||
| 	if isNilReply(line) { | ||||
| 		return nil, Nil | ||||
| 	} | ||||
|  | ||||
| 	replyLen, err := util.Atoi(line[1:]) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	buf := r.buf(replyLen + 2) | ||||
| 	_, err = io.ReadFull(r.rd, buf) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return buf[:replyLen], nil | ||||
| } | ||||
|  | ||||
| func (r *Reader) buf(n int) []byte { | ||||
| 	if n <= cap(r._buf) { | ||||
| 		return r._buf[:n] | ||||
| 	} | ||||
| 	d := n - cap(r._buf) | ||||
| 	r._buf = append(r._buf, make([]byte, d)...) | ||||
| 	return r._buf | ||||
| } | ||||
|  | ||||
| func isNilReply(b []byte) bool { | ||||
| 	return len(b) == 3 && | ||||
| 		(b[0] == StringReply || b[0] == ArrayReply) && | ||||
| 		b[1] == '-' && b[2] == '1' | ||||
| } | ||||
|  | ||||
| func ParseErrorReply(line []byte) error { | ||||
| 	return RedisError(string(line[1:])) | ||||
| } | ||||
|  | ||||
| func parseArrayLen(line []byte) (int64, error) { | ||||
| 	if isNilReply(line) { | ||||
| 		return 0, Nil | ||||
| 	} | ||||
| 	return util.ParseInt(line[1:], 10, 64) | ||||
| } | ||||
							
								
								
									
										173
									
								
								vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										173
									
								
								vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,173 @@ | ||||
| package proto | ||||
|  | ||||
| import ( | ||||
| 	"encoding" | ||||
| 	"fmt" | ||||
| 	"reflect" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal/util" | ||||
| ) | ||||
|  | ||||
| // Scan parses bytes `b` to `v` with appropriate type. | ||||
| // nolint: gocyclo | ||||
| func Scan(b []byte, v interface{}) error { | ||||
| 	switch v := v.(type) { | ||||
| 	case nil: | ||||
| 		return fmt.Errorf("redis: Scan(nil)") | ||||
| 	case *string: | ||||
| 		*v = util.BytesToString(b) | ||||
| 		return nil | ||||
| 	case *[]byte: | ||||
| 		*v = b | ||||
| 		return nil | ||||
| 	case *int: | ||||
| 		var err error | ||||
| 		*v, err = util.Atoi(b) | ||||
| 		return err | ||||
| 	case *int8: | ||||
| 		n, err := util.ParseInt(b, 10, 8) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		*v = int8(n) | ||||
| 		return nil | ||||
| 	case *int16: | ||||
| 		n, err := util.ParseInt(b, 10, 16) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		*v = int16(n) | ||||
| 		return nil | ||||
| 	case *int32: | ||||
| 		n, err := util.ParseInt(b, 10, 32) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		*v = int32(n) | ||||
| 		return nil | ||||
| 	case *int64: | ||||
| 		n, err := util.ParseInt(b, 10, 64) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		*v = n | ||||
| 		return nil | ||||
| 	case *uint: | ||||
| 		n, err := util.ParseUint(b, 10, 64) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		*v = uint(n) | ||||
| 		return nil | ||||
| 	case *uint8: | ||||
| 		n, err := util.ParseUint(b, 10, 8) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		*v = uint8(n) | ||||
| 		return nil | ||||
| 	case *uint16: | ||||
| 		n, err := util.ParseUint(b, 10, 16) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		*v = uint16(n) | ||||
| 		return nil | ||||
| 	case *uint32: | ||||
| 		n, err := util.ParseUint(b, 10, 32) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		*v = uint32(n) | ||||
| 		return nil | ||||
| 	case *uint64: | ||||
| 		n, err := util.ParseUint(b, 10, 64) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		*v = n | ||||
| 		return nil | ||||
| 	case *float32: | ||||
| 		n, err := util.ParseFloat(b, 32) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		*v = float32(n) | ||||
| 		return err | ||||
| 	case *float64: | ||||
| 		var err error | ||||
| 		*v, err = util.ParseFloat(b, 64) | ||||
| 		return err | ||||
| 	case *bool: | ||||
| 		*v = len(b) == 1 && b[0] == '1' | ||||
| 		return nil | ||||
| 	case *time.Time: | ||||
| 		var err error | ||||
| 		*v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b)) | ||||
| 		return err | ||||
| 	case encoding.BinaryUnmarshaler: | ||||
| 		return v.UnmarshalBinary(b) | ||||
| 	default: | ||||
| 		return fmt.Errorf( | ||||
| 			"redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func ScanSlice(data []string, slice interface{}) error { | ||||
| 	v := reflect.ValueOf(slice) | ||||
| 	if !v.IsValid() { | ||||
| 		return fmt.Errorf("redis: ScanSlice(nil)") | ||||
| 	} | ||||
| 	if v.Kind() != reflect.Ptr { | ||||
| 		return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice) | ||||
| 	} | ||||
| 	v = v.Elem() | ||||
| 	if v.Kind() != reflect.Slice { | ||||
| 		return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice) | ||||
| 	} | ||||
|  | ||||
| 	next := makeSliceNextElemFunc(v) | ||||
| 	for i, s := range data { | ||||
| 		elem := next() | ||||
| 		if err := Scan([]byte(s), elem.Addr().Interface()); err != nil { | ||||
| 			err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err) | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value { | ||||
| 	elemType := v.Type().Elem() | ||||
|  | ||||
| 	if elemType.Kind() == reflect.Ptr { | ||||
| 		elemType = elemType.Elem() | ||||
| 		return func() reflect.Value { | ||||
| 			if v.Len() < v.Cap() { | ||||
| 				v.Set(v.Slice(0, v.Len()+1)) | ||||
| 				elem := v.Index(v.Len() - 1) | ||||
| 				if elem.IsNil() { | ||||
| 					elem.Set(reflect.New(elemType)) | ||||
| 				} | ||||
| 				return elem.Elem() | ||||
| 			} | ||||
|  | ||||
| 			elem := reflect.New(elemType) | ||||
| 			v.Set(reflect.Append(v, elem)) | ||||
| 			return elem.Elem() | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	zero := reflect.Zero(elemType) | ||||
| 	return func() reflect.Value { | ||||
| 		if v.Len() < v.Cap() { | ||||
| 			v.Set(v.Slice(0, v.Len()+1)) | ||||
| 			return v.Index(v.Len() - 1) | ||||
| 		} | ||||
|  | ||||
| 		v.Set(reflect.Append(v, zero)) | ||||
| 		return v.Index(v.Len() - 1) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										153
									
								
								vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										153
									
								
								vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,153 @@ | ||||
| package proto | ||||
|  | ||||
| import ( | ||||
| 	"encoding" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal/util" | ||||
| ) | ||||
|  | ||||
| type writer interface { | ||||
| 	io.Writer | ||||
| 	io.ByteWriter | ||||
| 	// io.StringWriter | ||||
| 	WriteString(s string) (n int, err error) | ||||
| } | ||||
|  | ||||
| type Writer struct { | ||||
| 	writer | ||||
|  | ||||
| 	lenBuf []byte | ||||
| 	numBuf []byte | ||||
| } | ||||
|  | ||||
| func NewWriter(wr writer) *Writer { | ||||
| 	return &Writer{ | ||||
| 		writer: wr, | ||||
|  | ||||
| 		lenBuf: make([]byte, 64), | ||||
| 		numBuf: make([]byte, 64), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *Writer) WriteArgs(args []interface{}) error { | ||||
| 	if err := w.WriteByte(ArrayReply); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if err := w.writeLen(len(args)); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	for _, arg := range args { | ||||
| 		if err := w.WriteArg(arg); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (w *Writer) writeLen(n int) error { | ||||
| 	w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10) | ||||
| 	w.lenBuf = append(w.lenBuf, '\r', '\n') | ||||
| 	_, err := w.Write(w.lenBuf) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (w *Writer) WriteArg(v interface{}) error { | ||||
| 	switch v := v.(type) { | ||||
| 	case nil: | ||||
| 		return w.string("") | ||||
| 	case string: | ||||
| 		return w.string(v) | ||||
| 	case []byte: | ||||
| 		return w.bytes(v) | ||||
| 	case int: | ||||
| 		return w.int(int64(v)) | ||||
| 	case int8: | ||||
| 		return w.int(int64(v)) | ||||
| 	case int16: | ||||
| 		return w.int(int64(v)) | ||||
| 	case int32: | ||||
| 		return w.int(int64(v)) | ||||
| 	case int64: | ||||
| 		return w.int(v) | ||||
| 	case uint: | ||||
| 		return w.uint(uint64(v)) | ||||
| 	case uint8: | ||||
| 		return w.uint(uint64(v)) | ||||
| 	case uint16: | ||||
| 		return w.uint(uint64(v)) | ||||
| 	case uint32: | ||||
| 		return w.uint(uint64(v)) | ||||
| 	case uint64: | ||||
| 		return w.uint(v) | ||||
| 	case float32: | ||||
| 		return w.float(float64(v)) | ||||
| 	case float64: | ||||
| 		return w.float(v) | ||||
| 	case bool: | ||||
| 		if v { | ||||
| 			return w.int(1) | ||||
| 		} | ||||
| 		return w.int(0) | ||||
| 	case time.Time: | ||||
| 		w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano) | ||||
| 		return w.bytes(w.numBuf) | ||||
| 	case encoding.BinaryMarshaler: | ||||
| 		b, err := v.MarshalBinary() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		return w.bytes(b) | ||||
| 	default: | ||||
| 		return fmt.Errorf( | ||||
| 			"redis: can't marshal %T (implement encoding.BinaryMarshaler)", v) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (w *Writer) bytes(b []byte) error { | ||||
| 	if err := w.WriteByte(StringReply); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if err := w.writeLen(len(b)); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if _, err := w.Write(b); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return w.crlf() | ||||
| } | ||||
|  | ||||
| func (w *Writer) string(s string) error { | ||||
| 	return w.bytes(util.StringToBytes(s)) | ||||
| } | ||||
|  | ||||
| func (w *Writer) uint(n uint64) error { | ||||
| 	w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10) | ||||
| 	return w.bytes(w.numBuf) | ||||
| } | ||||
|  | ||||
| func (w *Writer) int(n int64) error { | ||||
| 	w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10) | ||||
| 	return w.bytes(w.numBuf) | ||||
| } | ||||
|  | ||||
| func (w *Writer) float(f float64) error { | ||||
| 	w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64) | ||||
| 	return w.bytes(w.numBuf) | ||||
| } | ||||
|  | ||||
| func (w *Writer) crlf() error { | ||||
| 	if err := w.WriteByte('\r'); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return w.WriteByte('\n') | ||||
| } | ||||
							
								
								
									
										45
									
								
								vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,45 @@ | ||||
| package rand | ||||
|  | ||||
| import ( | ||||
| 	"math/rand" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // Int returns a non-negative pseudo-random int. | ||||
| func Int() int { return pseudo.Int() } | ||||
|  | ||||
| // Intn returns, as an int, a non-negative pseudo-random number in [0,n). | ||||
| // It panics if n <= 0. | ||||
| func Intn(n int) int { return pseudo.Intn(n) } | ||||
|  | ||||
| // Int63n returns, as an int64, a non-negative pseudo-random number in [0,n). | ||||
| // It panics if n <= 0. | ||||
| func Int63n(n int64) int64 { return pseudo.Int63n(n) } | ||||
|  | ||||
| // Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n). | ||||
| func Perm(n int) []int { return pseudo.Perm(n) } | ||||
|  | ||||
| // Seed uses the provided seed value to initialize the default Source to a | ||||
| // deterministic state. If Seed is not called, the generator behaves as if | ||||
| // seeded by Seed(1). | ||||
| func Seed(n int64) { pseudo.Seed(n) } | ||||
|  | ||||
| var pseudo = rand.New(&source{src: rand.NewSource(1)}) | ||||
|  | ||||
| type source struct { | ||||
| 	src rand.Source | ||||
| 	mu  sync.Mutex | ||||
| } | ||||
|  | ||||
| func (s *source) Int63() int64 { | ||||
| 	s.mu.Lock() | ||||
| 	n := s.src.Int63() | ||||
| 	s.mu.Unlock() | ||||
| 	return n | ||||
| } | ||||
|  | ||||
| func (s *source) Seed(seed int64) { | ||||
| 	s.mu.Lock() | ||||
| 	s.src.Seed(seed) | ||||
| 	s.mu.Unlock() | ||||
| } | ||||
							
								
								
									
										11
									
								
								vendor/github.com/go-redis/redis/v8/internal/safe.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/go-redis/redis/v8/internal/safe.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| // +build appengine | ||||
|  | ||||
| package internal | ||||
|  | ||||
| func String(b []byte) string { | ||||
| 	return string(b) | ||||
| } | ||||
|  | ||||
| func Bytes(s string) []byte { | ||||
| 	return []byte(s) | ||||
| } | ||||
							
								
								
									
										20
									
								
								vendor/github.com/go-redis/redis/v8/internal/unsafe.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								vendor/github.com/go-redis/redis/v8/internal/unsafe.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,20 @@ | ||||
| // +build !appengine | ||||
|  | ||||
| package internal | ||||
|  | ||||
| import "unsafe" | ||||
|  | ||||
| // String converts byte slice to string. | ||||
| func String(b []byte) string { | ||||
| 	return *(*string)(unsafe.Pointer(&b)) | ||||
| } | ||||
|  | ||||
| // Bytes converts string to byte slice. | ||||
| func Bytes(s string) []byte { | ||||
| 	return *(*[]byte)(unsafe.Pointer( | ||||
| 		&struct { | ||||
| 			string | ||||
| 			Cap int | ||||
| 		}{s, len(s)}, | ||||
| 	)) | ||||
| } | ||||
							
								
								
									
										73
									
								
								vendor/github.com/go-redis/redis/v8/internal/util.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										73
									
								
								vendor/github.com/go-redis/redis/v8/internal/util.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,73 @@ | ||||
| package internal | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal/proto" | ||||
| 	"github.com/go-redis/redis/v8/internal/util" | ||||
| 	"go.opentelemetry.io/otel" | ||||
| 	"go.opentelemetry.io/otel/trace" | ||||
| ) | ||||
|  | ||||
| func Sleep(ctx context.Context, dur time.Duration) error { | ||||
| 	return WithSpan(ctx, "time.Sleep", func(ctx context.Context, span trace.Span) error { | ||||
| 		t := time.NewTimer(dur) | ||||
| 		defer t.Stop() | ||||
|  | ||||
| 		select { | ||||
| 		case <-t.C: | ||||
| 			return nil | ||||
| 		case <-ctx.Done(): | ||||
| 			return ctx.Err() | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func ToLower(s string) string { | ||||
| 	if isLower(s) { | ||||
| 		return s | ||||
| 	} | ||||
|  | ||||
| 	b := make([]byte, len(s)) | ||||
| 	for i := range b { | ||||
| 		c := s[i] | ||||
| 		if c >= 'A' && c <= 'Z' { | ||||
| 			c += 'a' - 'A' | ||||
| 		} | ||||
| 		b[i] = c | ||||
| 	} | ||||
| 	return util.BytesToString(b) | ||||
| } | ||||
|  | ||||
| func isLower(s string) bool { | ||||
| 	for i := 0; i < len(s); i++ { | ||||
| 		c := s[i] | ||||
| 		if c >= 'A' && c <= 'Z' { | ||||
| 			return false | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| var tracer = otel.Tracer("github.com/go-redis/redis") | ||||
|  | ||||
| func WithSpan(ctx context.Context, name string, fn func(context.Context, trace.Span) error) error { | ||||
| 	if span := trace.SpanFromContext(ctx); !span.IsRecording() { | ||||
| 		return fn(ctx, span) | ||||
| 	} | ||||
|  | ||||
| 	ctx, span := tracer.Start(ctx, name) | ||||
| 	defer span.End() | ||||
|  | ||||
| 	return fn(ctx, span) | ||||
| } | ||||
|  | ||||
| func RecordError(ctx context.Context, span trace.Span, err error) error { | ||||
| 	if err != proto.Nil { | ||||
| 		span.RecordError(err) | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
							
								
								
									
										11
									
								
								vendor/github.com/go-redis/redis/v8/internal/util/safe.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/go-redis/redis/v8/internal/util/safe.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| // +build appengine | ||||
|  | ||||
| package util | ||||
|  | ||||
| func BytesToString(b []byte) string { | ||||
| 	return string(b) | ||||
| } | ||||
|  | ||||
| func StringToBytes(s string) []byte { | ||||
| 	return []byte(s) | ||||
| } | ||||
							
								
								
									
										19
									
								
								vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										19
									
								
								vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,19 @@ | ||||
| package util | ||||
|  | ||||
| import "strconv" | ||||
|  | ||||
| func Atoi(b []byte) (int, error) { | ||||
| 	return strconv.Atoi(BytesToString(b)) | ||||
| } | ||||
|  | ||||
| func ParseInt(b []byte, base int, bitSize int) (int64, error) { | ||||
| 	return strconv.ParseInt(BytesToString(b), base, bitSize) | ||||
| } | ||||
|  | ||||
| func ParseUint(b []byte, base int, bitSize int) (uint64, error) { | ||||
| 	return strconv.ParseUint(BytesToString(b), base, bitSize) | ||||
| } | ||||
|  | ||||
| func ParseFloat(b []byte, bitSize int) (float64, error) { | ||||
| 	return strconv.ParseFloat(BytesToString(b), bitSize) | ||||
| } | ||||
							
								
								
									
										22
									
								
								vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | ||||
| // +build !appengine | ||||
|  | ||||
| package util | ||||
|  | ||||
| import ( | ||||
| 	"unsafe" | ||||
| ) | ||||
|  | ||||
| // BytesToString converts byte slice to string. | ||||
| func BytesToString(b []byte) string { | ||||
| 	return *(*string)(unsafe.Pointer(&b)) | ||||
| } | ||||
|  | ||||
| // StringToBytes converts string to byte slice. | ||||
| func StringToBytes(s string) []byte { | ||||
| 	return *(*[]byte)(unsafe.Pointer( | ||||
| 		&struct { | ||||
| 			string | ||||
| 			Cap int | ||||
| 		}{s, len(s)}, | ||||
| 	)) | ||||
| } | ||||
							
								
								
									
										77
									
								
								vendor/github.com/go-redis/redis/v8/iterator.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								vendor/github.com/go-redis/redis/v8/iterator.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,77 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // ScanIterator is used to incrementally iterate over a collection of elements. | ||||
| // It's safe for concurrent use by multiple goroutines. | ||||
| type ScanIterator struct { | ||||
| 	mu  sync.Mutex // protects Scanner and pos | ||||
| 	cmd *ScanCmd | ||||
| 	pos int | ||||
| } | ||||
|  | ||||
| // Err returns the last iterator error, if any. | ||||
| func (it *ScanIterator) Err() error { | ||||
| 	it.mu.Lock() | ||||
| 	err := it.cmd.Err() | ||||
| 	it.mu.Unlock() | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // Next advances the cursor and returns true if more values can be read. | ||||
| func (it *ScanIterator) Next(ctx context.Context) bool { | ||||
| 	it.mu.Lock() | ||||
| 	defer it.mu.Unlock() | ||||
|  | ||||
| 	// Instantly return on errors. | ||||
| 	if it.cmd.Err() != nil { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	// Advance cursor, check if we are still within range. | ||||
| 	if it.pos < len(it.cmd.page) { | ||||
| 		it.pos++ | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	for { | ||||
| 		// Return if there is no more data to fetch. | ||||
| 		if it.cmd.cursor == 0 { | ||||
| 			return false | ||||
| 		} | ||||
|  | ||||
| 		// Fetch next page. | ||||
| 		switch it.cmd.args[0] { | ||||
| 		case "scan", "qscan": | ||||
| 			it.cmd.args[1] = it.cmd.cursor | ||||
| 		default: | ||||
| 			it.cmd.args[2] = it.cmd.cursor | ||||
| 		} | ||||
|  | ||||
| 		err := it.cmd.process(ctx, it.cmd) | ||||
| 		if err != nil { | ||||
| 			return false | ||||
| 		} | ||||
|  | ||||
| 		it.pos = 1 | ||||
|  | ||||
| 		// Redis can occasionally return empty page. | ||||
| 		if len(it.cmd.page) > 0 { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Val returns the key/field at the current cursor position. | ||||
| func (it *ScanIterator) Val() string { | ||||
| 	var v string | ||||
| 	it.mu.Lock() | ||||
| 	if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) { | ||||
| 		v = it.cmd.page[it.pos-1] | ||||
| 	} | ||||
| 	it.mu.Unlock() | ||||
| 	return v | ||||
| } | ||||
							
								
								
									
										317
									
								
								vendor/github.com/go-redis/redis/v8/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										317
									
								
								vendor/github.com/go-redis/redis/v8/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,317 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"crypto/tls" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"net" | ||||
| 	"net/url" | ||||
| 	"runtime" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal" | ||||
| 	"github.com/go-redis/redis/v8/internal/pool" | ||||
| 	"go.opentelemetry.io/otel/label" | ||||
| 	"go.opentelemetry.io/otel/trace" | ||||
| ) | ||||
|  | ||||
| // Limiter is the interface of a rate limiter or a circuit breaker. | ||||
| type Limiter interface { | ||||
| 	// Allow returns nil if operation is allowed or an error otherwise. | ||||
| 	// If operation is allowed client must ReportResult of the operation | ||||
| 	// whether it is a success or a failure. | ||||
| 	Allow() error | ||||
| 	// ReportResult reports the result of the previously allowed operation. | ||||
| 	// nil indicates a success, non-nil error usually indicates a failure. | ||||
| 	ReportResult(result error) | ||||
| } | ||||
|  | ||||
| // Options keeps the settings to setup redis connection. | ||||
| type Options struct { | ||||
| 	// The network type, either tcp or unix. | ||||
| 	// Default is tcp. | ||||
| 	Network string | ||||
| 	// host:port address. | ||||
| 	Addr string | ||||
|  | ||||
| 	// Dialer creates new network connection and has priority over | ||||
| 	// Network and Addr options. | ||||
| 	Dialer func(ctx context.Context, network, addr string) (net.Conn, error) | ||||
|  | ||||
| 	// Hook that is called when new connection is established. | ||||
| 	OnConnect func(ctx context.Context, cn *Conn) error | ||||
|  | ||||
| 	// Use the specified Username to authenticate the current connection | ||||
| 	// with one of the connections defined in the ACL list when connecting | ||||
| 	// to a Redis 6.0 instance, or greater, that is using the Redis ACL system. | ||||
| 	Username string | ||||
| 	// Optional password. Must match the password specified in the | ||||
| 	// requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower), | ||||
| 	// or the User Password when connecting to a Redis 6.0 instance, or greater, | ||||
| 	// that is using the Redis ACL system. | ||||
| 	Password string | ||||
|  | ||||
| 	// Database to be selected after connecting to the server. | ||||
| 	DB int | ||||
|  | ||||
| 	// Maximum number of retries before giving up. | ||||
| 	// Default is 3 retries; -1 (not 0) disables retries. | ||||
| 	MaxRetries int | ||||
| 	// Minimum backoff between each retry. | ||||
| 	// Default is 8 milliseconds; -1 disables backoff. | ||||
| 	MinRetryBackoff time.Duration | ||||
| 	// Maximum backoff between each retry. | ||||
| 	// Default is 512 milliseconds; -1 disables backoff. | ||||
| 	MaxRetryBackoff time.Duration | ||||
|  | ||||
| 	// Dial timeout for establishing new connections. | ||||
| 	// Default is 5 seconds. | ||||
| 	DialTimeout time.Duration | ||||
| 	// Timeout for socket reads. If reached, commands will fail | ||||
| 	// with a timeout instead of blocking. Use value -1 for no timeout and 0 for default. | ||||
| 	// Default is 3 seconds. | ||||
| 	ReadTimeout time.Duration | ||||
| 	// Timeout for socket writes. If reached, commands will fail | ||||
| 	// with a timeout instead of blocking. | ||||
| 	// Default is ReadTimeout. | ||||
| 	WriteTimeout time.Duration | ||||
|  | ||||
| 	// Maximum number of socket connections. | ||||
| 	// Default is 10 connections per every CPU as reported by runtime.NumCPU. | ||||
| 	PoolSize int | ||||
| 	// Minimum number of idle connections which is useful when establishing | ||||
| 	// new connection is slow. | ||||
| 	MinIdleConns int | ||||
| 	// Connection age at which client retires (closes) the connection. | ||||
| 	// Default is to not close aged connections. | ||||
| 	MaxConnAge time.Duration | ||||
| 	// Amount of time client waits for connection if all connections | ||||
| 	// are busy before returning an error. | ||||
| 	// Default is ReadTimeout + 1 second. | ||||
| 	PoolTimeout time.Duration | ||||
| 	// Amount of time after which client closes idle connections. | ||||
| 	// Should be less than server's timeout. | ||||
| 	// Default is 5 minutes. -1 disables idle timeout check. | ||||
| 	IdleTimeout time.Duration | ||||
| 	// Frequency of idle checks made by idle connections reaper. | ||||
| 	// Default is 1 minute. -1 disables idle connections reaper, | ||||
| 	// but idle connections are still discarded by the client | ||||
| 	// if IdleTimeout is set. | ||||
| 	IdleCheckFrequency time.Duration | ||||
|  | ||||
| 	// Enables read only queries on slave nodes. | ||||
| 	readOnly bool | ||||
|  | ||||
| 	// TLS Config to use. When set TLS will be negotiated. | ||||
| 	TLSConfig *tls.Config | ||||
|  | ||||
| 	// Limiter interface used to implemented circuit breaker or rate limiter. | ||||
| 	Limiter Limiter | ||||
| } | ||||
|  | ||||
| func (opt *Options) init() { | ||||
| 	if opt.Addr == "" { | ||||
| 		opt.Addr = "localhost:6379" | ||||
| 	} | ||||
| 	if opt.Network == "" { | ||||
| 		if strings.HasPrefix(opt.Addr, "/") { | ||||
| 			opt.Network = "unix" | ||||
| 		} else { | ||||
| 			opt.Network = "tcp" | ||||
| 		} | ||||
| 	} | ||||
| 	if opt.DialTimeout == 0 { | ||||
| 		opt.DialTimeout = 5 * time.Second | ||||
| 	} | ||||
| 	if opt.Dialer == nil { | ||||
| 		opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) { | ||||
| 			netDialer := &net.Dialer{ | ||||
| 				Timeout:   opt.DialTimeout, | ||||
| 				KeepAlive: 5 * time.Minute, | ||||
| 			} | ||||
| 			if opt.TLSConfig == nil { | ||||
| 				return netDialer.DialContext(ctx, network, addr) | ||||
| 			} | ||||
| 			return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig) | ||||
| 		} | ||||
| 	} | ||||
| 	if opt.PoolSize == 0 { | ||||
| 		opt.PoolSize = 10 * runtime.NumCPU() | ||||
| 	} | ||||
| 	switch opt.ReadTimeout { | ||||
| 	case -1: | ||||
| 		opt.ReadTimeout = 0 | ||||
| 	case 0: | ||||
| 		opt.ReadTimeout = 3 * time.Second | ||||
| 	} | ||||
| 	switch opt.WriteTimeout { | ||||
| 	case -1: | ||||
| 		opt.WriteTimeout = 0 | ||||
| 	case 0: | ||||
| 		opt.WriteTimeout = opt.ReadTimeout | ||||
| 	} | ||||
| 	if opt.PoolTimeout == 0 { | ||||
| 		opt.PoolTimeout = opt.ReadTimeout + time.Second | ||||
| 	} | ||||
| 	if opt.IdleTimeout == 0 { | ||||
| 		opt.IdleTimeout = 5 * time.Minute | ||||
| 	} | ||||
| 	if opt.IdleCheckFrequency == 0 { | ||||
| 		opt.IdleCheckFrequency = time.Minute | ||||
| 	} | ||||
|  | ||||
| 	if opt.MaxRetries == -1 { | ||||
| 		opt.MaxRetries = 0 | ||||
| 	} else if opt.MaxRetries == 0 { | ||||
| 		opt.MaxRetries = 3 | ||||
| 	} | ||||
| 	switch opt.MinRetryBackoff { | ||||
| 	case -1: | ||||
| 		opt.MinRetryBackoff = 0 | ||||
| 	case 0: | ||||
| 		opt.MinRetryBackoff = 8 * time.Millisecond | ||||
| 	} | ||||
| 	switch opt.MaxRetryBackoff { | ||||
| 	case -1: | ||||
| 		opt.MaxRetryBackoff = 0 | ||||
| 	case 0: | ||||
| 		opt.MaxRetryBackoff = 512 * time.Millisecond | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (opt *Options) clone() *Options { | ||||
| 	clone := *opt | ||||
| 	return &clone | ||||
| } | ||||
|  | ||||
| // ParseURL parses an URL into Options that can be used to connect to Redis. | ||||
| // Scheme is required. | ||||
| // There are two connection types: by tcp socket and by unix socket. | ||||
| // Tcp connection: | ||||
| // 		redis://<user>:<password>@<host>:<port>/<db_number> | ||||
| // Unix connection: | ||||
| //		unix://<user>:<password>@</path/to/redis.sock>?db=<db_number> | ||||
| func ParseURL(redisURL string) (*Options, error) { | ||||
| 	u, err := url.Parse(redisURL) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	switch u.Scheme { | ||||
| 	case "redis", "rediss": | ||||
| 		return setupTCPConn(u) | ||||
| 	case "unix": | ||||
| 		return setupUnixConn(u) | ||||
| 	default: | ||||
| 		return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func setupTCPConn(u *url.URL) (*Options, error) { | ||||
| 	o := &Options{Network: "tcp"} | ||||
|  | ||||
| 	o.Username, o.Password = getUserPassword(u) | ||||
|  | ||||
| 	if len(u.Query()) > 0 { | ||||
| 		return nil, errors.New("redis: no options supported") | ||||
| 	} | ||||
|  | ||||
| 	h, p, err := net.SplitHostPort(u.Host) | ||||
| 	if err != nil { | ||||
| 		h = u.Host | ||||
| 	} | ||||
| 	if h == "" { | ||||
| 		h = "localhost" | ||||
| 	} | ||||
| 	if p == "" { | ||||
| 		p = "6379" | ||||
| 	} | ||||
| 	o.Addr = net.JoinHostPort(h, p) | ||||
|  | ||||
| 	f := strings.FieldsFunc(u.Path, func(r rune) bool { | ||||
| 		return r == '/' | ||||
| 	}) | ||||
| 	switch len(f) { | ||||
| 	case 0: | ||||
| 		o.DB = 0 | ||||
| 	case 1: | ||||
| 		if o.DB, err = strconv.Atoi(f[0]); err != nil { | ||||
| 			return nil, fmt.Errorf("redis: invalid database number: %q", f[0]) | ||||
| 		} | ||||
| 	default: | ||||
| 		return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path) | ||||
| 	} | ||||
|  | ||||
| 	if u.Scheme == "rediss" { | ||||
| 		o.TLSConfig = &tls.Config{ServerName: h} | ||||
| 	} | ||||
|  | ||||
| 	return o, nil | ||||
| } | ||||
|  | ||||
| func setupUnixConn(u *url.URL) (*Options, error) { | ||||
| 	o := &Options{ | ||||
| 		Network: "unix", | ||||
| 	} | ||||
|  | ||||
| 	if strings.TrimSpace(u.Path) == "" { // path is required with unix connection | ||||
| 		return nil, errors.New("redis: empty unix socket path") | ||||
| 	} | ||||
| 	o.Addr = u.Path | ||||
|  | ||||
| 	o.Username, o.Password = getUserPassword(u) | ||||
|  | ||||
| 	dbStr := u.Query().Get("db") | ||||
| 	if dbStr == "" { | ||||
| 		return o, nil // if database is not set, connect to 0 db. | ||||
| 	} | ||||
|  | ||||
| 	db, err := strconv.Atoi(dbStr) | ||||
| 	if err != nil { | ||||
| 		return nil, fmt.Errorf("redis: invalid database number: %w", err) | ||||
| 	} | ||||
| 	o.DB = db | ||||
|  | ||||
| 	return o, nil | ||||
| } | ||||
|  | ||||
| func getUserPassword(u *url.URL) (string, string) { | ||||
| 	var user, password string | ||||
| 	if u.User != nil { | ||||
| 		user = u.User.Username() | ||||
| 		if p, ok := u.User.Password(); ok { | ||||
| 			password = p | ||||
| 		} | ||||
| 	} | ||||
| 	return user, password | ||||
| } | ||||
|  | ||||
| func newConnPool(opt *Options) *pool.ConnPool { | ||||
| 	return pool.NewConnPool(&pool.Options{ | ||||
| 		Dialer: func(ctx context.Context) (net.Conn, error) { | ||||
| 			var conn net.Conn | ||||
| 			err := internal.WithSpan(ctx, "redis.dial", func(ctx context.Context, span trace.Span) error { | ||||
| 				span.SetAttributes( | ||||
| 					label.String("db.connection_string", opt.Addr), | ||||
| 				) | ||||
|  | ||||
| 				var err error | ||||
| 				conn, err = opt.Dialer(ctx, opt.Network, opt.Addr) | ||||
| 				if err != nil { | ||||
| 					_ = internal.RecordError(ctx, span, err) | ||||
| 				} | ||||
| 				return err | ||||
| 			}) | ||||
| 			return conn, err | ||||
| 		}, | ||||
| 		PoolSize:           opt.PoolSize, | ||||
| 		MinIdleConns:       opt.MinIdleConns, | ||||
| 		MaxConnAge:         opt.MaxConnAge, | ||||
| 		PoolTimeout:        opt.PoolTimeout, | ||||
| 		IdleTimeout:        opt.IdleTimeout, | ||||
| 		IdleCheckFrequency: opt.IdleCheckFrequency, | ||||
| 	}) | ||||
| } | ||||
							
								
								
									
										137
									
								
								vendor/github.com/go-redis/redis/v8/pipeline.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										137
									
								
								vendor/github.com/go-redis/redis/v8/pipeline.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,137 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal/pool" | ||||
| ) | ||||
|  | ||||
| type pipelineExecer func(context.Context, []Cmder) error | ||||
|  | ||||
| // Pipeliner is an mechanism to realise Redis Pipeline technique. | ||||
| // | ||||
| // Pipelining is a technique to extremely speed up processing by packing | ||||
| // operations to batches, send them at once to Redis and read a replies in a | ||||
| // singe step. | ||||
| // See https://redis.io/topics/pipelining | ||||
| // | ||||
| // Pay attention, that Pipeline is not a transaction, so you can get unexpected | ||||
| // results in case of big pipelines and small read/write timeouts. | ||||
| // Redis client has retransmission logic in case of timeouts, pipeline | ||||
| // can be retransmitted and commands can be executed more then once. | ||||
| // To avoid this: it is good idea to use reasonable bigger read/write timeouts | ||||
| // depends of your batch size and/or use TxPipeline. | ||||
| type Pipeliner interface { | ||||
| 	StatefulCmdable | ||||
| 	Do(ctx context.Context, args ...interface{}) *Cmd | ||||
| 	Process(ctx context.Context, cmd Cmder) error | ||||
| 	Close() error | ||||
| 	Discard() error | ||||
| 	Exec(ctx context.Context) ([]Cmder, error) | ||||
| } | ||||
|  | ||||
| var _ Pipeliner = (*Pipeline)(nil) | ||||
|  | ||||
| // Pipeline implements pipelining as described in | ||||
| // http://redis.io/topics/pipelining. It's safe for concurrent use | ||||
| // by multiple goroutines. | ||||
| type Pipeline struct { | ||||
| 	cmdable | ||||
| 	statefulCmdable | ||||
|  | ||||
| 	ctx  context.Context | ||||
| 	exec pipelineExecer | ||||
|  | ||||
| 	mu     sync.Mutex | ||||
| 	cmds   []Cmder | ||||
| 	closed bool | ||||
| } | ||||
|  | ||||
| func (c *Pipeline) init() { | ||||
| 	c.cmdable = c.Process | ||||
| 	c.statefulCmdable = c.Process | ||||
| } | ||||
|  | ||||
| func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd { | ||||
| 	cmd := NewCmd(ctx, args...) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Process queues the cmd for later execution. | ||||
| func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error { | ||||
| 	c.mu.Lock() | ||||
| 	c.cmds = append(c.cmds, cmd) | ||||
| 	c.mu.Unlock() | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Close closes the pipeline, releasing any open resources. | ||||
| func (c *Pipeline) Close() error { | ||||
| 	c.mu.Lock() | ||||
| 	_ = c.discard() | ||||
| 	c.closed = true | ||||
| 	c.mu.Unlock() | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Discard resets the pipeline and discards queued commands. | ||||
| func (c *Pipeline) Discard() error { | ||||
| 	c.mu.Lock() | ||||
| 	err := c.discard() | ||||
| 	c.mu.Unlock() | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (c *Pipeline) discard() error { | ||||
| 	if c.closed { | ||||
| 		return pool.ErrClosed | ||||
| 	} | ||||
| 	c.cmds = c.cmds[:0] | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Exec executes all previously queued commands using one | ||||
| // client-server roundtrip. | ||||
| // | ||||
| // Exec always returns list of commands and error of the first failed | ||||
| // command if any. | ||||
| func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	if c.closed { | ||||
| 		return nil, pool.ErrClosed | ||||
| 	} | ||||
|  | ||||
| 	if len(c.cmds) == 0 { | ||||
| 		return nil, nil | ||||
| 	} | ||||
|  | ||||
| 	cmds := c.cmds | ||||
| 	c.cmds = nil | ||||
|  | ||||
| 	return cmds, c.exec(ctx, cmds) | ||||
| } | ||||
|  | ||||
| func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | ||||
| 	if err := fn(c); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	cmds, err := c.Exec(ctx) | ||||
| 	_ = c.Close() | ||||
| 	return cmds, err | ||||
| } | ||||
|  | ||||
| func (c *Pipeline) Pipeline() Pipeliner { | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | ||||
| 	return c.Pipelined(ctx, fn) | ||||
| } | ||||
|  | ||||
| func (c *Pipeline) TxPipeline() Pipeliner { | ||||
| 	return c | ||||
| } | ||||
							
								
								
									
										629
									
								
								vendor/github.com/go-redis/redis/v8/pubsub.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										629
									
								
								vendor/github.com/go-redis/redis/v8/pubsub.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,629 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal" | ||||
| 	"github.com/go-redis/redis/v8/internal/pool" | ||||
| 	"github.com/go-redis/redis/v8/internal/proto" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	pingTimeout     = time.Second | ||||
| 	chanSendTimeout = time.Minute | ||||
| ) | ||||
|  | ||||
| var errPingTimeout = errors.New("redis: ping timeout") | ||||
|  | ||||
| // PubSub implements Pub/Sub commands as described in | ||||
| // http://redis.io/topics/pubsub. Message receiving is NOT safe | ||||
| // for concurrent use by multiple goroutines. | ||||
| // | ||||
| // PubSub automatically reconnects to Redis Server and resubscribes | ||||
| // to the channels in case of network errors. | ||||
| type PubSub struct { | ||||
| 	opt *Options | ||||
|  | ||||
| 	newConn   func(ctx context.Context, channels []string) (*pool.Conn, error) | ||||
| 	closeConn func(*pool.Conn) error | ||||
|  | ||||
| 	mu       sync.Mutex | ||||
| 	cn       *pool.Conn | ||||
| 	channels map[string]struct{} | ||||
| 	patterns map[string]struct{} | ||||
|  | ||||
| 	closed bool | ||||
| 	exit   chan struct{} | ||||
|  | ||||
| 	cmd *Cmd | ||||
|  | ||||
| 	chOnce sync.Once | ||||
| 	msgCh  chan *Message | ||||
| 	allCh  chan interface{} | ||||
| 	ping   chan struct{} | ||||
| } | ||||
|  | ||||
| func (c *PubSub) String() string { | ||||
| 	channels := mapKeys(c.channels) | ||||
| 	channels = append(channels, mapKeys(c.patterns)...) | ||||
| 	return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", ")) | ||||
| } | ||||
|  | ||||
| func (c *PubSub) init() { | ||||
| 	c.exit = make(chan struct{}) | ||||
| } | ||||
|  | ||||
| func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) { | ||||
| 	c.mu.Lock() | ||||
| 	cn, err := c.conn(ctx, nil) | ||||
| 	c.mu.Unlock() | ||||
| 	return cn, err | ||||
| } | ||||
|  | ||||
| func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) { | ||||
| 	if c.closed { | ||||
| 		return nil, pool.ErrClosed | ||||
| 	} | ||||
| 	if c.cn != nil { | ||||
| 		return c.cn, nil | ||||
| 	} | ||||
|  | ||||
| 	channels := mapKeys(c.channels) | ||||
| 	channels = append(channels, newChannels...) | ||||
|  | ||||
| 	cn, err := c.newConn(ctx, channels) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	if err := c.resubscribe(ctx, cn); err != nil { | ||||
| 		_ = c.closeConn(cn) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	c.cn = cn | ||||
| 	return cn, nil | ||||
| } | ||||
|  | ||||
| func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error { | ||||
| 	return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { | ||||
| 		return writeCmd(wr, cmd) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error { | ||||
| 	var firstErr error | ||||
|  | ||||
| 	if len(c.channels) > 0 { | ||||
| 		firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels)) | ||||
| 	} | ||||
|  | ||||
| 	if len(c.patterns) > 0 { | ||||
| 		err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns)) | ||||
| 		if err != nil && firstErr == nil { | ||||
| 			firstErr = err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return firstErr | ||||
| } | ||||
|  | ||||
| func mapKeys(m map[string]struct{}) []string { | ||||
| 	s := make([]string, len(m)) | ||||
| 	i := 0 | ||||
| 	for k := range m { | ||||
| 		s[i] = k | ||||
| 		i++ | ||||
| 	} | ||||
| 	return s | ||||
| } | ||||
|  | ||||
| func (c *PubSub) _subscribe( | ||||
| 	ctx context.Context, cn *pool.Conn, redisCmd string, channels []string, | ||||
| ) error { | ||||
| 	args := make([]interface{}, 0, 1+len(channels)) | ||||
| 	args = append(args, redisCmd) | ||||
| 	for _, channel := range channels { | ||||
| 		args = append(args, channel) | ||||
| 	} | ||||
| 	cmd := NewSliceCmd(ctx, args...) | ||||
| 	return c.writeCmd(ctx, cn, cmd) | ||||
| } | ||||
|  | ||||
| func (c *PubSub) releaseConnWithLock( | ||||
| 	ctx context.Context, | ||||
| 	cn *pool.Conn, | ||||
| 	err error, | ||||
| 	allowTimeout bool, | ||||
| ) { | ||||
| 	c.mu.Lock() | ||||
| 	c.releaseConn(ctx, cn, err, allowTimeout) | ||||
| 	c.mu.Unlock() | ||||
| } | ||||
|  | ||||
| func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) { | ||||
| 	if c.cn != cn { | ||||
| 		return | ||||
| 	} | ||||
| 	if isBadConn(err, allowTimeout) { | ||||
| 		c.reconnect(ctx, err) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *PubSub) reconnect(ctx context.Context, reason error) { | ||||
| 	_ = c.closeTheCn(reason) | ||||
| 	_, _ = c.conn(ctx, nil) | ||||
| } | ||||
|  | ||||
| func (c *PubSub) closeTheCn(reason error) error { | ||||
| 	if c.cn == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	if !c.closed { | ||||
| 		internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason) | ||||
| 	} | ||||
| 	err := c.closeConn(c.cn) | ||||
| 	c.cn = nil | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (c *PubSub) Close() error { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	if c.closed { | ||||
| 		return pool.ErrClosed | ||||
| 	} | ||||
| 	c.closed = true | ||||
| 	close(c.exit) | ||||
|  | ||||
| 	return c.closeTheCn(pool.ErrClosed) | ||||
| } | ||||
|  | ||||
| // Subscribe the client to the specified channels. It returns | ||||
| // empty subscription if there are no channels. | ||||
| func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	err := c.subscribe(ctx, "subscribe", channels...) | ||||
| 	if c.channels == nil { | ||||
| 		c.channels = make(map[string]struct{}) | ||||
| 	} | ||||
| 	for _, s := range channels { | ||||
| 		c.channels[s] = struct{}{} | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // PSubscribe the client to the given patterns. It returns | ||||
| // empty subscription if there are no patterns. | ||||
| func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	err := c.subscribe(ctx, "psubscribe", patterns...) | ||||
| 	if c.patterns == nil { | ||||
| 		c.patterns = make(map[string]struct{}) | ||||
| 	} | ||||
| 	for _, s := range patterns { | ||||
| 		c.patterns[s] = struct{}{} | ||||
| 	} | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // Unsubscribe the client from the given channels, or from all of | ||||
| // them if none is given. | ||||
| func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	for _, channel := range channels { | ||||
| 		delete(c.channels, channel) | ||||
| 	} | ||||
| 	err := c.subscribe(ctx, "unsubscribe", channels...) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // PUnsubscribe the client from the given patterns, or from all of | ||||
| // them if none is given. | ||||
| func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	for _, pattern := range patterns { | ||||
| 		delete(c.patterns, pattern) | ||||
| 	} | ||||
| 	err := c.subscribe(ctx, "punsubscribe", patterns...) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error { | ||||
| 	cn, err := c.conn(ctx, channels) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	err = c._subscribe(ctx, cn, redisCmd, channels) | ||||
| 	c.releaseConn(ctx, cn, err, false) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| func (c *PubSub) Ping(ctx context.Context, payload ...string) error { | ||||
| 	args := []interface{}{"ping"} | ||||
| 	if len(payload) == 1 { | ||||
| 		args = append(args, payload[0]) | ||||
| 	} | ||||
| 	cmd := NewCmd(ctx, args...) | ||||
|  | ||||
| 	cn, err := c.connWithLock(ctx) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	err = c.writeCmd(ctx, cn, cmd) | ||||
| 	c.releaseConnWithLock(ctx, cn, err, false) | ||||
| 	return err | ||||
| } | ||||
|  | ||||
| // Subscription received after a successful subscription to channel. | ||||
| type Subscription struct { | ||||
| 	// Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe". | ||||
| 	Kind string | ||||
| 	// Channel name we have subscribed to. | ||||
| 	Channel string | ||||
| 	// Number of channels we are currently subscribed to. | ||||
| 	Count int | ||||
| } | ||||
|  | ||||
| func (m *Subscription) String() string { | ||||
| 	return fmt.Sprintf("%s: %s", m.Kind, m.Channel) | ||||
| } | ||||
|  | ||||
| // Message received as result of a PUBLISH command issued by another client. | ||||
| type Message struct { | ||||
| 	Channel      string | ||||
| 	Pattern      string | ||||
| 	Payload      string | ||||
| 	PayloadSlice []string | ||||
| } | ||||
|  | ||||
| func (m *Message) String() string { | ||||
| 	return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload) | ||||
| } | ||||
|  | ||||
| // Pong received as result of a PING command issued by another client. | ||||
| type Pong struct { | ||||
| 	Payload string | ||||
| } | ||||
|  | ||||
| func (p *Pong) String() string { | ||||
| 	if p.Payload != "" { | ||||
| 		return fmt.Sprintf("Pong<%s>", p.Payload) | ||||
| 	} | ||||
| 	return "Pong" | ||||
| } | ||||
|  | ||||
| func (c *PubSub) newMessage(reply interface{}) (interface{}, error) { | ||||
| 	switch reply := reply.(type) { | ||||
| 	case string: | ||||
| 		return &Pong{ | ||||
| 			Payload: reply, | ||||
| 		}, nil | ||||
| 	case []interface{}: | ||||
| 		switch kind := reply[0].(string); kind { | ||||
| 		case "subscribe", "unsubscribe", "psubscribe", "punsubscribe": | ||||
| 			// Can be nil in case of "unsubscribe". | ||||
| 			channel, _ := reply[1].(string) | ||||
| 			return &Subscription{ | ||||
| 				Kind:    kind, | ||||
| 				Channel: channel, | ||||
| 				Count:   int(reply[2].(int64)), | ||||
| 			}, nil | ||||
| 		case "message": | ||||
| 			switch payload := reply[2].(type) { | ||||
| 			case string: | ||||
| 				return &Message{ | ||||
| 					Channel: reply[1].(string), | ||||
| 					Payload: payload, | ||||
| 				}, nil | ||||
| 			case []interface{}: | ||||
| 				ss := make([]string, len(payload)) | ||||
| 				for i, s := range payload { | ||||
| 					ss[i] = s.(string) | ||||
| 				} | ||||
| 				return &Message{ | ||||
| 					Channel:      reply[1].(string), | ||||
| 					PayloadSlice: ss, | ||||
| 				}, nil | ||||
| 			default: | ||||
| 				return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload) | ||||
| 			} | ||||
| 		case "pmessage": | ||||
| 			return &Message{ | ||||
| 				Pattern: reply[1].(string), | ||||
| 				Channel: reply[2].(string), | ||||
| 				Payload: reply[3].(string), | ||||
| 			}, nil | ||||
| 		case "pong": | ||||
| 			return &Pong{ | ||||
| 				Payload: reply[1].(string), | ||||
| 			}, nil | ||||
| 		default: | ||||
| 			return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind) | ||||
| 		} | ||||
| 	default: | ||||
| 		return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // ReceiveTimeout acts like Receive but returns an error if message | ||||
| // is not received in time. This is low-level API and in most cases | ||||
| // Channel should be used instead. | ||||
| func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) { | ||||
| 	if c.cmd == nil { | ||||
| 		c.cmd = NewCmd(ctx) | ||||
| 	} | ||||
|  | ||||
| 	cn, err := c.connWithLock(ctx) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error { | ||||
| 		return c.cmd.readReply(rd) | ||||
| 	}) | ||||
|  | ||||
| 	c.releaseConnWithLock(ctx, cn, err, timeout > 0) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return c.newMessage(c.cmd.Val()) | ||||
| } | ||||
|  | ||||
| // Receive returns a message as a Subscription, Message, Pong or error. | ||||
| // See PubSub example for details. This is low-level API and in most cases | ||||
| // Channel should be used instead. | ||||
| func (c *PubSub) Receive(ctx context.Context) (interface{}, error) { | ||||
| 	return c.ReceiveTimeout(ctx, 0) | ||||
| } | ||||
|  | ||||
| // ReceiveMessage returns a Message or error ignoring Subscription and Pong | ||||
| // messages. This is low-level API and in most cases Channel should be used | ||||
| // instead. | ||||
| func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) { | ||||
| 	for { | ||||
| 		msg, err := c.Receive(ctx) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		switch msg := msg.(type) { | ||||
| 		case *Subscription: | ||||
| 			// Ignore. | ||||
| 		case *Pong: | ||||
| 			// Ignore. | ||||
| 		case *Message: | ||||
| 			return msg, nil | ||||
| 		default: | ||||
| 			err := fmt.Errorf("redis: unknown message: %T", msg) | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Channel returns a Go channel for concurrently receiving messages. | ||||
| // The channel is closed together with the PubSub. If the Go channel | ||||
| // is blocked full for 30 seconds the message is dropped. | ||||
| // Receive* APIs can not be used after channel is created. | ||||
| // | ||||
| // go-redis periodically sends ping messages to test connection health | ||||
| // and re-subscribes if ping can not not received for 30 seconds. | ||||
| func (c *PubSub) Channel() <-chan *Message { | ||||
| 	return c.ChannelSize(100) | ||||
| } | ||||
|  | ||||
| // ChannelSize is like Channel, but creates a Go channel | ||||
| // with specified buffer size. | ||||
| func (c *PubSub) ChannelSize(size int) <-chan *Message { | ||||
| 	c.chOnce.Do(func() { | ||||
| 		c.initPing() | ||||
| 		c.initMsgChan(size) | ||||
| 	}) | ||||
| 	if c.msgCh == nil { | ||||
| 		err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions") | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	if cap(c.msgCh) != size { | ||||
| 		err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created") | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return c.msgCh | ||||
| } | ||||
|  | ||||
| // ChannelWithSubscriptions is like Channel, but message type can be either | ||||
| // *Subscription or *Message. Subscription messages can be used to detect | ||||
| // reconnections. | ||||
| // | ||||
| // ChannelWithSubscriptions can not be used together with Channel or ChannelSize. | ||||
| func (c *PubSub) ChannelWithSubscriptions(ctx context.Context, size int) <-chan interface{} { | ||||
| 	c.chOnce.Do(func() { | ||||
| 		c.initPing() | ||||
| 		c.initAllChan(size) | ||||
| 	}) | ||||
| 	if c.allCh == nil { | ||||
| 		err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel") | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	if cap(c.allCh) != size { | ||||
| 		err := fmt.Errorf("redis: PubSub.Channel size can not be changed once created") | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return c.allCh | ||||
| } | ||||
|  | ||||
| func (c *PubSub) getContext() context.Context { | ||||
| 	if c.cmd != nil { | ||||
| 		return c.cmd.ctx | ||||
| 	} | ||||
| 	return context.Background() | ||||
| } | ||||
|  | ||||
| func (c *PubSub) initPing() { | ||||
| 	ctx := context.TODO() | ||||
| 	c.ping = make(chan struct{}, 1) | ||||
| 	go func() { | ||||
| 		timer := time.NewTimer(time.Minute) | ||||
| 		timer.Stop() | ||||
|  | ||||
| 		healthy := true | ||||
| 		for { | ||||
| 			timer.Reset(pingTimeout) | ||||
| 			select { | ||||
| 			case <-c.ping: | ||||
| 				healthy = true | ||||
| 				if !timer.Stop() { | ||||
| 					<-timer.C | ||||
| 				} | ||||
| 			case <-timer.C: | ||||
| 				pingErr := c.Ping(ctx) | ||||
| 				if healthy { | ||||
| 					healthy = false | ||||
| 				} else { | ||||
| 					if pingErr == nil { | ||||
| 						pingErr = errPingTimeout | ||||
| 					} | ||||
| 					c.mu.Lock() | ||||
| 					c.reconnect(ctx, pingErr) | ||||
| 					healthy = true | ||||
| 					c.mu.Unlock() | ||||
| 				} | ||||
| 			case <-c.exit: | ||||
| 				return | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
| } | ||||
|  | ||||
| // initMsgChan must be in sync with initAllChan. | ||||
| func (c *PubSub) initMsgChan(size int) { | ||||
| 	ctx := context.TODO() | ||||
| 	c.msgCh = make(chan *Message, size) | ||||
| 	go func() { | ||||
| 		timer := time.NewTimer(time.Minute) | ||||
| 		timer.Stop() | ||||
|  | ||||
| 		var errCount int | ||||
| 		for { | ||||
| 			msg, err := c.Receive(ctx) | ||||
| 			if err != nil { | ||||
| 				if err == pool.ErrClosed { | ||||
| 					close(c.msgCh) | ||||
| 					return | ||||
| 				} | ||||
| 				if errCount > 0 { | ||||
| 					time.Sleep(100 * time.Millisecond) | ||||
| 				} | ||||
| 				errCount++ | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			errCount = 0 | ||||
|  | ||||
| 			// Any message is as good as a ping. | ||||
| 			select { | ||||
| 			case c.ping <- struct{}{}: | ||||
| 			default: | ||||
| 			} | ||||
|  | ||||
| 			switch msg := msg.(type) { | ||||
| 			case *Subscription: | ||||
| 				// Ignore. | ||||
| 			case *Pong: | ||||
| 				// Ignore. | ||||
| 			case *Message: | ||||
| 				timer.Reset(chanSendTimeout) | ||||
| 				select { | ||||
| 				case c.msgCh <- msg: | ||||
| 					if !timer.Stop() { | ||||
| 						<-timer.C | ||||
| 					} | ||||
| 				case <-timer.C: | ||||
| 					internal.Logger.Printf( | ||||
| 						c.getContext(), | ||||
| 						"redis: %s channel is full for %s (message is dropped)", | ||||
| 						c, | ||||
| 						chanSendTimeout, | ||||
| 					) | ||||
| 				} | ||||
| 			default: | ||||
| 				internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg) | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
| } | ||||
|  | ||||
| // initAllChan must be in sync with initMsgChan. | ||||
| func (c *PubSub) initAllChan(size int) { | ||||
| 	ctx := context.TODO() | ||||
| 	c.allCh = make(chan interface{}, size) | ||||
| 	go func() { | ||||
| 		timer := time.NewTimer(pingTimeout) | ||||
| 		timer.Stop() | ||||
|  | ||||
| 		var errCount int | ||||
| 		for { | ||||
| 			msg, err := c.Receive(ctx) | ||||
| 			if err != nil { | ||||
| 				if err == pool.ErrClosed { | ||||
| 					close(c.allCh) | ||||
| 					return | ||||
| 				} | ||||
| 				if errCount > 0 { | ||||
| 					time.Sleep(100 * time.Millisecond) | ||||
| 				} | ||||
| 				errCount++ | ||||
| 				continue | ||||
| 			} | ||||
|  | ||||
| 			errCount = 0 | ||||
|  | ||||
| 			// Any message is as good as a ping. | ||||
| 			select { | ||||
| 			case c.ping <- struct{}{}: | ||||
| 			default: | ||||
| 			} | ||||
|  | ||||
| 			switch msg := msg.(type) { | ||||
| 			case *Subscription: | ||||
| 				c.sendMessage(msg, timer) | ||||
| 			case *Pong: | ||||
| 				// Ignore. | ||||
| 			case *Message: | ||||
| 				c.sendMessage(msg, timer) | ||||
| 			default: | ||||
| 				internal.Logger.Printf(c.getContext(), "redis: unknown message type: %T", msg) | ||||
| 			} | ||||
| 		} | ||||
| 	}() | ||||
| } | ||||
|  | ||||
| func (c *PubSub) sendMessage(msg interface{}, timer *time.Timer) { | ||||
| 	timer.Reset(pingTimeout) | ||||
| 	select { | ||||
| 	case c.allCh <- msg: | ||||
| 		if !timer.Stop() { | ||||
| 			<-timer.C | ||||
| 		} | ||||
| 	case <-timer.C: | ||||
| 		internal.Logger.Printf( | ||||
| 			c.getContext(), | ||||
| 			"redis: %s channel is full for %s (message is dropped)", c, pingTimeout) | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										792
									
								
								vendor/github.com/go-redis/redis/v8/redis.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										792
									
								
								vendor/github.com/go-redis/redis/v8/redis.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,792 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal" | ||||
| 	"github.com/go-redis/redis/v8/internal/pool" | ||||
| 	"github.com/go-redis/redis/v8/internal/proto" | ||||
| 	"go.opentelemetry.io/otel/label" | ||||
| 	"go.opentelemetry.io/otel/trace" | ||||
| ) | ||||
|  | ||||
| // Nil reply returned by Redis when key does not exist. | ||||
| const Nil = proto.Nil | ||||
|  | ||||
| func SetLogger(logger internal.Logging) { | ||||
| 	internal.Logger = logger | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type Hook interface { | ||||
| 	BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error) | ||||
| 	AfterProcess(ctx context.Context, cmd Cmder) error | ||||
|  | ||||
| 	BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error) | ||||
| 	AfterProcessPipeline(ctx context.Context, cmds []Cmder) error | ||||
| } | ||||
|  | ||||
| type hooks struct { | ||||
| 	hooks []Hook | ||||
| } | ||||
|  | ||||
| func (hs *hooks) lock() { | ||||
| 	hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)] | ||||
| } | ||||
|  | ||||
| func (hs hooks) clone() hooks { | ||||
| 	clone := hs | ||||
| 	clone.lock() | ||||
| 	return clone | ||||
| } | ||||
|  | ||||
| func (hs *hooks) AddHook(hook Hook) { | ||||
| 	hs.hooks = append(hs.hooks, hook) | ||||
| } | ||||
|  | ||||
| func (hs hooks) process( | ||||
| 	ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error, | ||||
| ) error { | ||||
| 	if len(hs.hooks) == 0 { | ||||
| 		err := hs.withContext(ctx, func() error { | ||||
| 			return fn(ctx, cmd) | ||||
| 		}) | ||||
| 		cmd.SetErr(err) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	var hookIndex int | ||||
| 	var retErr error | ||||
|  | ||||
| 	for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ { | ||||
| 		ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd) | ||||
| 		if retErr != nil { | ||||
| 			cmd.SetErr(retErr) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if retErr == nil { | ||||
| 		retErr = hs.withContext(ctx, func() error { | ||||
| 			return fn(ctx, cmd) | ||||
| 		}) | ||||
| 		cmd.SetErr(retErr) | ||||
| 	} | ||||
|  | ||||
| 	for hookIndex--; hookIndex >= 0; hookIndex-- { | ||||
| 		if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil { | ||||
| 			retErr = err | ||||
| 			cmd.SetErr(retErr) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return retErr | ||||
| } | ||||
|  | ||||
| func (hs hooks) processPipeline( | ||||
| 	ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error, | ||||
| ) error { | ||||
| 	if len(hs.hooks) == 0 { | ||||
| 		err := hs.withContext(ctx, func() error { | ||||
| 			return fn(ctx, cmds) | ||||
| 		}) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	var hookIndex int | ||||
| 	var retErr error | ||||
|  | ||||
| 	for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ { | ||||
| 		ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds) | ||||
| 		if retErr != nil { | ||||
| 			setCmdsErr(cmds, retErr) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if retErr == nil { | ||||
| 		retErr = hs.withContext(ctx, func() error { | ||||
| 			return fn(ctx, cmds) | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	for hookIndex--; hookIndex >= 0; hookIndex-- { | ||||
| 		if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil { | ||||
| 			retErr = err | ||||
| 			setCmdsErr(cmds, retErr) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return retErr | ||||
| } | ||||
|  | ||||
| func (hs hooks) processTxPipeline( | ||||
| 	ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error, | ||||
| ) error { | ||||
| 	cmds = wrapMultiExec(ctx, cmds) | ||||
| 	return hs.processPipeline(ctx, cmds, fn) | ||||
| } | ||||
|  | ||||
| func (hs hooks) withContext(ctx context.Context, fn func() error) error { | ||||
| 	return fn() | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type baseClient struct { | ||||
| 	opt      *Options | ||||
| 	connPool pool.Pooler | ||||
|  | ||||
| 	onClose func() error // hook called when client is closed | ||||
| } | ||||
|  | ||||
| func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient { | ||||
| 	return &baseClient{ | ||||
| 		opt:      opt, | ||||
| 		connPool: connPool, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *baseClient) clone() *baseClient { | ||||
| 	clone := *c | ||||
| 	return &clone | ||||
| } | ||||
|  | ||||
| func (c *baseClient) withTimeout(timeout time.Duration) *baseClient { | ||||
| 	opt := c.opt.clone() | ||||
| 	opt.ReadTimeout = timeout | ||||
| 	opt.WriteTimeout = timeout | ||||
|  | ||||
| 	clone := c.clone() | ||||
| 	clone.opt = opt | ||||
|  | ||||
| 	return clone | ||||
| } | ||||
|  | ||||
| func (c *baseClient) String() string { | ||||
| 	return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB) | ||||
| } | ||||
|  | ||||
| func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) { | ||||
| 	cn, err := c.connPool.NewConn(ctx) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	err = c.initConn(ctx, cn) | ||||
| 	if err != nil { | ||||
| 		_ = c.connPool.CloseConn(cn) | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return cn, nil | ||||
| } | ||||
|  | ||||
| func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) { | ||||
| 	if c.opt.Limiter != nil { | ||||
| 		err := c.opt.Limiter.Allow() | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	cn, err := c._getConn(ctx) | ||||
| 	if err != nil { | ||||
| 		if c.opt.Limiter != nil { | ||||
| 			c.opt.Limiter.ReportResult(err) | ||||
| 		} | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return cn, nil | ||||
| } | ||||
|  | ||||
| func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) { | ||||
| 	cn, err := c.connPool.Get(ctx) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	if cn.Inited { | ||||
| 		return cn, nil | ||||
| 	} | ||||
|  | ||||
| 	err = internal.WithSpan(ctx, "redis.init_conn", func(ctx context.Context, span trace.Span) error { | ||||
| 		return c.initConn(ctx, cn) | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		c.connPool.Remove(ctx, cn, err) | ||||
| 		if err := errors.Unwrap(err); err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		return nil, err | ||||
| 	} | ||||
|  | ||||
| 	return cn, nil | ||||
| } | ||||
|  | ||||
| func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error { | ||||
| 	if cn.Inited { | ||||
| 		return nil | ||||
| 	} | ||||
| 	cn.Inited = true | ||||
|  | ||||
| 	if c.opt.Password == "" && | ||||
| 		c.opt.DB == 0 && | ||||
| 		!c.opt.readOnly && | ||||
| 		c.opt.OnConnect == nil { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	connPool := pool.NewSingleConnPool(c.connPool, cn) | ||||
| 	conn := newConn(ctx, c.opt, connPool) | ||||
|  | ||||
| 	_, err := conn.Pipelined(ctx, func(pipe Pipeliner) error { | ||||
| 		if c.opt.Password != "" { | ||||
| 			if c.opt.Username != "" { | ||||
| 				pipe.AuthACL(ctx, c.opt.Username, c.opt.Password) | ||||
| 			} else { | ||||
| 				pipe.Auth(ctx, c.opt.Password) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if c.opt.DB > 0 { | ||||
| 			pipe.Select(ctx, c.opt.DB) | ||||
| 		} | ||||
|  | ||||
| 		if c.opt.readOnly { | ||||
| 			pipe.ReadOnly(ctx) | ||||
| 		} | ||||
|  | ||||
| 		return nil | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if c.opt.OnConnect != nil { | ||||
| 		return c.opt.OnConnect(ctx, conn) | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) { | ||||
| 	if c.opt.Limiter != nil { | ||||
| 		c.opt.Limiter.ReportResult(err) | ||||
| 	} | ||||
|  | ||||
| 	if isBadConn(err, false) { | ||||
| 		c.connPool.Remove(ctx, cn, err) | ||||
| 	} else { | ||||
| 		c.connPool.Put(ctx, cn) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *baseClient) withConn( | ||||
| 	ctx context.Context, fn func(context.Context, *pool.Conn) error, | ||||
| ) error { | ||||
| 	return internal.WithSpan(ctx, "redis.with_conn", func(ctx context.Context, span trace.Span) error { | ||||
| 		cn, err := c.getConn(ctx) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		if span.IsRecording() { | ||||
| 			if remoteAddr := cn.RemoteAddr(); remoteAddr != nil { | ||||
| 				span.SetAttributes(label.String("net.peer.ip", remoteAddr.String())) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		defer func() { | ||||
| 			c.releaseConn(ctx, cn, err) | ||||
| 		}() | ||||
|  | ||||
| 		done := ctx.Done() | ||||
| 		if done == nil { | ||||
| 			err = fn(ctx, cn) | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		errc := make(chan error, 1) | ||||
| 		go func() { errc <- fn(ctx, cn) }() | ||||
|  | ||||
| 		select { | ||||
| 		case <-done: | ||||
| 			_ = cn.Close() | ||||
| 			// Wait for the goroutine to finish and send something. | ||||
| 			<-errc | ||||
|  | ||||
| 			err = ctx.Err() | ||||
| 			return err | ||||
| 		case err = <-errc: | ||||
| 			return err | ||||
| 		} | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (c *baseClient) process(ctx context.Context, cmd Cmder) error { | ||||
| 	var lastErr error | ||||
| 	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { | ||||
| 		attempt := attempt | ||||
|  | ||||
| 		var retry bool | ||||
| 		err := internal.WithSpan(ctx, "redis.process", func(ctx context.Context, span trace.Span) error { | ||||
| 			if attempt > 0 { | ||||
| 				if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { | ||||
| 					return err | ||||
| 				} | ||||
| 			} | ||||
|  | ||||
| 			retryTimeout := uint32(1) | ||||
| 			err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { | ||||
| 				err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { | ||||
| 					return writeCmd(wr, cmd) | ||||
| 				}) | ||||
| 				if err != nil { | ||||
| 					return err | ||||
| 				} | ||||
|  | ||||
| 				err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply) | ||||
| 				if err != nil { | ||||
| 					if cmd.readTimeout() == nil { | ||||
| 						atomic.StoreUint32(&retryTimeout, 1) | ||||
| 					} | ||||
| 					return err | ||||
| 				} | ||||
|  | ||||
| 				return nil | ||||
| 			}) | ||||
| 			if err == nil { | ||||
| 				return nil | ||||
| 			} | ||||
| 			retry = shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1) | ||||
| 			return err | ||||
| 		}) | ||||
| 		if err == nil || !retry { | ||||
| 			return err | ||||
| 		} | ||||
| 		lastErr = err | ||||
| 	} | ||||
| 	return lastErr | ||||
| } | ||||
|  | ||||
| func (c *baseClient) retryBackoff(attempt int) time.Duration { | ||||
| 	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) | ||||
| } | ||||
|  | ||||
| func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration { | ||||
| 	if timeout := cmd.readTimeout(); timeout != nil { | ||||
| 		t := *timeout | ||||
| 		if t == 0 { | ||||
| 			return 0 | ||||
| 		} | ||||
| 		return t + 10*time.Second | ||||
| 	} | ||||
| 	return c.opt.ReadTimeout | ||||
| } | ||||
|  | ||||
| // Close closes the client, releasing any open resources. | ||||
| // | ||||
| // It is rare to Close a Client, as the Client is meant to be | ||||
| // long-lived and shared between many goroutines. | ||||
| func (c *baseClient) Close() error { | ||||
| 	var firstErr error | ||||
| 	if c.onClose != nil { | ||||
| 		if err := c.onClose(); err != nil { | ||||
| 			firstErr = err | ||||
| 		} | ||||
| 	} | ||||
| 	if err := c.connPool.Close(); err != nil && firstErr == nil { | ||||
| 		firstErr = err | ||||
| 	} | ||||
| 	return firstErr | ||||
| } | ||||
|  | ||||
| func (c *baseClient) getAddr() string { | ||||
| 	return c.opt.Addr | ||||
| } | ||||
|  | ||||
| func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error { | ||||
| 	return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds) | ||||
| } | ||||
|  | ||||
| func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error { | ||||
| 	return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds) | ||||
| } | ||||
|  | ||||
| type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error) | ||||
|  | ||||
| func (c *baseClient) generalProcessPipeline( | ||||
| 	ctx context.Context, cmds []Cmder, p pipelineProcessor, | ||||
| ) error { | ||||
| 	err := c._generalProcessPipeline(ctx, cmds, p) | ||||
| 	if err != nil { | ||||
| 		setCmdsErr(cmds, err) | ||||
| 		return err | ||||
| 	} | ||||
| 	return cmdsFirstErr(cmds) | ||||
| } | ||||
|  | ||||
| func (c *baseClient) _generalProcessPipeline( | ||||
| 	ctx context.Context, cmds []Cmder, p pipelineProcessor, | ||||
| ) error { | ||||
| 	var lastErr error | ||||
| 	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { | ||||
| 		if attempt > 0 { | ||||
| 			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		var canRetry bool | ||||
| 		lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error { | ||||
| 			var err error | ||||
| 			canRetry, err = p(ctx, cn, cmds) | ||||
| 			return err | ||||
| 		}) | ||||
| 		if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) { | ||||
| 			return lastErr | ||||
| 		} | ||||
| 	} | ||||
| 	return lastErr | ||||
| } | ||||
|  | ||||
| func (c *baseClient) pipelineProcessCmds( | ||||
| 	ctx context.Context, cn *pool.Conn, cmds []Cmder, | ||||
| ) (bool, error) { | ||||
| 	err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { | ||||
| 		return writeCmds(wr, cmds) | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return true, err | ||||
| 	} | ||||
|  | ||||
| 	err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { | ||||
| 		return pipelineReadCmds(rd, cmds) | ||||
| 	}) | ||||
| 	return true, err | ||||
| } | ||||
|  | ||||
| func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error { | ||||
| 	for _, cmd := range cmds { | ||||
| 		err := cmd.readReply(rd) | ||||
| 		cmd.SetErr(err) | ||||
| 		if err != nil && !isRedisError(err) { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (c *baseClient) txPipelineProcessCmds( | ||||
| 	ctx context.Context, cn *pool.Conn, cmds []Cmder, | ||||
| ) (bool, error) { | ||||
| 	err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error { | ||||
| 		return writeCmds(wr, cmds) | ||||
| 	}) | ||||
| 	if err != nil { | ||||
| 		return true, err | ||||
| 	} | ||||
|  | ||||
| 	err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error { | ||||
| 		statusCmd := cmds[0].(*StatusCmd) | ||||
| 		// Trim multi and exec. | ||||
| 		cmds = cmds[1 : len(cmds)-1] | ||||
|  | ||||
| 		err := txPipelineReadQueued(rd, statusCmd, cmds) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		return pipelineReadCmds(rd, cmds) | ||||
| 	}) | ||||
| 	return false, err | ||||
| } | ||||
|  | ||||
| func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder { | ||||
| 	if len(cmds) == 0 { | ||||
| 		panic("not reached") | ||||
| 	} | ||||
| 	cmdCopy := make([]Cmder, len(cmds)+2) | ||||
| 	cmdCopy[0] = NewStatusCmd(ctx, "multi") | ||||
| 	copy(cmdCopy[1:], cmds) | ||||
| 	cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec") | ||||
| 	return cmdCopy | ||||
| } | ||||
|  | ||||
| func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error { | ||||
| 	// Parse queued replies. | ||||
| 	if err := statusCmd.readReply(rd); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	for range cmds { | ||||
| 		if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// Parse number of replies. | ||||
| 	line, err := rd.ReadLine() | ||||
| 	if err != nil { | ||||
| 		if err == Nil { | ||||
| 			err = TxFailedErr | ||||
| 		} | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	switch line[0] { | ||||
| 	case proto.ErrorReply: | ||||
| 		return proto.ParseErrorReply(line) | ||||
| 	case proto.ArrayReply: | ||||
| 		// ok | ||||
| 	default: | ||||
| 		err := fmt.Errorf("redis: expected '*', but got line %q", line) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| // Client is a Redis client representing a pool of zero or more | ||||
| // underlying connections. It's safe for concurrent use by multiple | ||||
| // goroutines. | ||||
| type Client struct { | ||||
| 	*baseClient | ||||
| 	cmdable | ||||
| 	hooks | ||||
| 	ctx context.Context | ||||
| } | ||||
|  | ||||
| // NewClient returns a client to the Redis Server specified by Options. | ||||
| func NewClient(opt *Options) *Client { | ||||
| 	opt.init() | ||||
|  | ||||
| 	c := Client{ | ||||
| 		baseClient: newBaseClient(opt, newConnPool(opt)), | ||||
| 		ctx:        context.Background(), | ||||
| 	} | ||||
| 	c.cmdable = c.Process | ||||
|  | ||||
| 	return &c | ||||
| } | ||||
|  | ||||
| func (c *Client) clone() *Client { | ||||
| 	clone := *c | ||||
| 	clone.cmdable = clone.Process | ||||
| 	clone.hooks.lock() | ||||
| 	return &clone | ||||
| } | ||||
|  | ||||
| func (c *Client) WithTimeout(timeout time.Duration) *Client { | ||||
| 	clone := c.clone() | ||||
| 	clone.baseClient = c.baseClient.withTimeout(timeout) | ||||
| 	return clone | ||||
| } | ||||
|  | ||||
| func (c *Client) Context() context.Context { | ||||
| 	return c.ctx | ||||
| } | ||||
|  | ||||
| func (c *Client) WithContext(ctx context.Context) *Client { | ||||
| 	if ctx == nil { | ||||
| 		panic("nil context") | ||||
| 	} | ||||
| 	clone := c.clone() | ||||
| 	clone.ctx = ctx | ||||
| 	return clone | ||||
| } | ||||
|  | ||||
| func (c *Client) Conn(ctx context.Context) *Conn { | ||||
| 	return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool)) | ||||
| } | ||||
|  | ||||
| // Do creates a Cmd from the args and processes the cmd. | ||||
| func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd { | ||||
| 	cmd := NewCmd(ctx, args...) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| func (c *Client) Process(ctx context.Context, cmd Cmder) error { | ||||
| 	return c.hooks.process(ctx, cmd, c.baseClient.process) | ||||
| } | ||||
|  | ||||
| func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error { | ||||
| 	return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) | ||||
| } | ||||
|  | ||||
| func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error { | ||||
| 	return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) | ||||
| } | ||||
|  | ||||
| // Options returns read-only Options that were used to create the client. | ||||
| func (c *Client) Options() *Options { | ||||
| 	return c.opt | ||||
| } | ||||
|  | ||||
| type PoolStats pool.Stats | ||||
|  | ||||
| // PoolStats returns connection pool stats. | ||||
| func (c *Client) PoolStats() *PoolStats { | ||||
| 	stats := c.connPool.Stats() | ||||
| 	return (*PoolStats)(stats) | ||||
| } | ||||
|  | ||||
| func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | ||||
| 	return c.Pipeline().Pipelined(ctx, fn) | ||||
| } | ||||
|  | ||||
| func (c *Client) Pipeline() Pipeliner { | ||||
| 	pipe := Pipeline{ | ||||
| 		ctx:  c.ctx, | ||||
| 		exec: c.processPipeline, | ||||
| 	} | ||||
| 	pipe.init() | ||||
| 	return &pipe | ||||
| } | ||||
|  | ||||
| func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | ||||
| 	return c.TxPipeline().Pipelined(ctx, fn) | ||||
| } | ||||
|  | ||||
| // TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. | ||||
| func (c *Client) TxPipeline() Pipeliner { | ||||
| 	pipe := Pipeline{ | ||||
| 		ctx:  c.ctx, | ||||
| 		exec: c.processTxPipeline, | ||||
| 	} | ||||
| 	pipe.init() | ||||
| 	return &pipe | ||||
| } | ||||
|  | ||||
| func (c *Client) pubSub() *PubSub { | ||||
| 	pubsub := &PubSub{ | ||||
| 		opt: c.opt, | ||||
|  | ||||
| 		newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { | ||||
| 			return c.newConn(ctx) | ||||
| 		}, | ||||
| 		closeConn: c.connPool.CloseConn, | ||||
| 	} | ||||
| 	pubsub.init() | ||||
| 	return pubsub | ||||
| } | ||||
|  | ||||
| // Subscribe subscribes the client to the specified channels. | ||||
| // Channels can be omitted to create empty subscription. | ||||
| // Note that this method does not wait on a response from Redis, so the | ||||
| // subscription may not be active immediately. To force the connection to wait, | ||||
| // you may call the Receive() method on the returned *PubSub like so: | ||||
| // | ||||
| //    sub := client.Subscribe(queryResp) | ||||
| //    iface, err := sub.Receive() | ||||
| //    if err != nil { | ||||
| //        // handle error | ||||
| //    } | ||||
| // | ||||
| //    // Should be *Subscription, but others are possible if other actions have been | ||||
| //    // taken on sub since it was created. | ||||
| //    switch iface.(type) { | ||||
| //    case *Subscription: | ||||
| //        // subscribe succeeded | ||||
| //    case *Message: | ||||
| //        // received first message | ||||
| //    case *Pong: | ||||
| //        // pong received | ||||
| //    default: | ||||
| //        // handle error | ||||
| //    } | ||||
| // | ||||
| //    ch := sub.Channel() | ||||
| func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub { | ||||
| 	pubsub := c.pubSub() | ||||
| 	if len(channels) > 0 { | ||||
| 		_ = pubsub.Subscribe(ctx, channels...) | ||||
| 	} | ||||
| 	return pubsub | ||||
| } | ||||
|  | ||||
| // PSubscribe subscribes the client to the given patterns. | ||||
| // Patterns can be omitted to create empty subscription. | ||||
| func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub { | ||||
| 	pubsub := c.pubSub() | ||||
| 	if len(channels) > 0 { | ||||
| 		_ = pubsub.PSubscribe(ctx, channels...) | ||||
| 	} | ||||
| 	return pubsub | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type conn struct { | ||||
| 	baseClient | ||||
| 	cmdable | ||||
| 	statefulCmdable | ||||
| 	hooks // TODO: inherit hooks | ||||
| } | ||||
|  | ||||
| // Conn is like Client, but its pool contains single connection. | ||||
| type Conn struct { | ||||
| 	*conn | ||||
| 	ctx context.Context | ||||
| } | ||||
|  | ||||
| func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn { | ||||
| 	c := Conn{ | ||||
| 		conn: &conn{ | ||||
| 			baseClient: baseClient{ | ||||
| 				opt:      opt, | ||||
| 				connPool: connPool, | ||||
| 			}, | ||||
| 		}, | ||||
| 		ctx: ctx, | ||||
| 	} | ||||
| 	c.cmdable = c.Process | ||||
| 	c.statefulCmdable = c.Process | ||||
| 	return &c | ||||
| } | ||||
|  | ||||
| func (c *Conn) Process(ctx context.Context, cmd Cmder) error { | ||||
| 	return c.hooks.process(ctx, cmd, c.baseClient.process) | ||||
| } | ||||
|  | ||||
| func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error { | ||||
| 	return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) | ||||
| } | ||||
|  | ||||
| func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error { | ||||
| 	return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) | ||||
| } | ||||
|  | ||||
| func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | ||||
| 	return c.Pipeline().Pipelined(ctx, fn) | ||||
| } | ||||
|  | ||||
| func (c *Conn) Pipeline() Pipeliner { | ||||
| 	pipe := Pipeline{ | ||||
| 		ctx:  c.ctx, | ||||
| 		exec: c.processPipeline, | ||||
| 	} | ||||
| 	pipe.init() | ||||
| 	return &pipe | ||||
| } | ||||
|  | ||||
| func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | ||||
| 	return c.TxPipeline().Pipelined(ctx, fn) | ||||
| } | ||||
|  | ||||
| // TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC. | ||||
| func (c *Conn) TxPipeline() Pipeliner { | ||||
| 	pipe := Pipeline{ | ||||
| 		ctx:  c.ctx, | ||||
| 		exec: c.processTxPipeline, | ||||
| 	} | ||||
| 	pipe.init() | ||||
| 	return &pipe | ||||
| } | ||||
							
								
								
									
										180
									
								
								vendor/github.com/go-redis/redis/v8/result.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										180
									
								
								vendor/github.com/go-redis/redis/v8/result.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,180 @@ | ||||
| package redis | ||||
|  | ||||
| import "time" | ||||
|  | ||||
| // NewCmdResult returns a Cmd initialised with val and err for testing. | ||||
| func NewCmdResult(val interface{}, err error) *Cmd { | ||||
| 	var cmd Cmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewSliceResult returns a SliceCmd initialised with val and err for testing. | ||||
| func NewSliceResult(val []interface{}, err error) *SliceCmd { | ||||
| 	var cmd SliceCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewStatusResult returns a StatusCmd initialised with val and err for testing. | ||||
| func NewStatusResult(val string, err error) *StatusCmd { | ||||
| 	var cmd StatusCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewIntResult returns an IntCmd initialised with val and err for testing. | ||||
| func NewIntResult(val int64, err error) *IntCmd { | ||||
| 	var cmd IntCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewDurationResult returns a DurationCmd initialised with val and err for testing. | ||||
| func NewDurationResult(val time.Duration, err error) *DurationCmd { | ||||
| 	var cmd DurationCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewBoolResult returns a BoolCmd initialised with val and err for testing. | ||||
| func NewBoolResult(val bool, err error) *BoolCmd { | ||||
| 	var cmd BoolCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewStringResult returns a StringCmd initialised with val and err for testing. | ||||
| func NewStringResult(val string, err error) *StringCmd { | ||||
| 	var cmd StringCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewFloatResult returns a FloatCmd initialised with val and err for testing. | ||||
| func NewFloatResult(val float64, err error) *FloatCmd { | ||||
| 	var cmd FloatCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing. | ||||
| func NewStringSliceResult(val []string, err error) *StringSliceCmd { | ||||
| 	var cmd StringSliceCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing. | ||||
| func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd { | ||||
| 	var cmd BoolSliceCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing. | ||||
| func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd { | ||||
| 	var cmd StringStringMapCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing. | ||||
| func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd { | ||||
| 	var cmd StringIntMapCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewTimeCmdResult returns a TimeCmd initialised with val and err for testing. | ||||
| func NewTimeCmdResult(val time.Time, err error) *TimeCmd { | ||||
| 	var cmd TimeCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing. | ||||
| func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd { | ||||
| 	var cmd ZSliceCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing. | ||||
| func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd { | ||||
| 	var cmd ZWithKeyCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewScanCmdResult returns a ScanCmd initialised with val and err for testing. | ||||
| func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd { | ||||
| 	var cmd ScanCmd | ||||
| 	cmd.page = keys | ||||
| 	cmd.cursor = cursor | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing. | ||||
| func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd { | ||||
| 	var cmd ClusterSlotsCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing. | ||||
| func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd { | ||||
| 	var cmd GeoLocationCmd | ||||
| 	cmd.locations = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing. | ||||
| func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd { | ||||
| 	var cmd GeoPosCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing. | ||||
| func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd { | ||||
| 	var cmd CommandsInfoCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing. | ||||
| func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd { | ||||
| 	var cmd XMessageSliceCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
|  | ||||
| // NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing. | ||||
| func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd { | ||||
| 	var cmd XStreamSliceCmd | ||||
| 	cmd.val = val | ||||
| 	cmd.SetErr(err) | ||||
| 	return &cmd | ||||
| } | ||||
							
								
								
									
										731
									
								
								vendor/github.com/go-redis/redis/v8/ring.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										731
									
								
								vendor/github.com/go-redis/redis/v8/ring.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,731 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"crypto/tls" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"net" | ||||
| 	"strconv" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/cespare/xxhash/v2" | ||||
| 	"github.com/dgryski/go-rendezvous" | ||||
| 	"github.com/go-redis/redis/v8/internal" | ||||
| 	"github.com/go-redis/redis/v8/internal/hashtag" | ||||
| 	"github.com/go-redis/redis/v8/internal/pool" | ||||
| 	"github.com/go-redis/redis/v8/internal/rand" | ||||
| ) | ||||
|  | ||||
| var errRingShardsDown = errors.New("redis: all ring shards are down") | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type ConsistentHash interface { | ||||
| 	Get(string) string | ||||
| } | ||||
|  | ||||
| type rendezvousWrapper struct { | ||||
| 	*rendezvous.Rendezvous | ||||
| } | ||||
|  | ||||
| func (w rendezvousWrapper) Get(key string) string { | ||||
| 	return w.Lookup(key) | ||||
| } | ||||
|  | ||||
| func newRendezvous(shards []string) ConsistentHash { | ||||
| 	return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)} | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| // RingOptions are used to configure a ring client and should be | ||||
| // passed to NewRing. | ||||
| type RingOptions struct { | ||||
| 	// Map of name => host:port addresses of ring shards. | ||||
| 	Addrs map[string]string | ||||
|  | ||||
| 	// NewClient creates a shard client with provided name and options. | ||||
| 	NewClient func(name string, opt *Options) *Client | ||||
|  | ||||
| 	// Frequency of PING commands sent to check shards availability. | ||||
| 	// Shard is considered down after 3 subsequent failed checks. | ||||
| 	HeartbeatFrequency time.Duration | ||||
|  | ||||
| 	// NewConsistentHash returns a consistent hash that is used | ||||
| 	// to distribute keys across the shards. | ||||
| 	// | ||||
| 	// See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8 | ||||
| 	// for consistent hashing algorithmic tradeoffs. | ||||
| 	NewConsistentHash func(shards []string) ConsistentHash | ||||
|  | ||||
| 	// Following options are copied from Options struct. | ||||
|  | ||||
| 	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error) | ||||
| 	OnConnect func(ctx context.Context, cn *Conn) error | ||||
|  | ||||
| 	Username string | ||||
| 	Password string | ||||
| 	DB       int | ||||
|  | ||||
| 	MaxRetries      int | ||||
| 	MinRetryBackoff time.Duration | ||||
| 	MaxRetryBackoff time.Duration | ||||
|  | ||||
| 	DialTimeout  time.Duration | ||||
| 	ReadTimeout  time.Duration | ||||
| 	WriteTimeout time.Duration | ||||
|  | ||||
| 	PoolSize           int | ||||
| 	MinIdleConns       int | ||||
| 	MaxConnAge         time.Duration | ||||
| 	PoolTimeout        time.Duration | ||||
| 	IdleTimeout        time.Duration | ||||
| 	IdleCheckFrequency time.Duration | ||||
|  | ||||
| 	TLSConfig *tls.Config | ||||
| 	Limiter   Limiter | ||||
| } | ||||
|  | ||||
| func (opt *RingOptions) init() { | ||||
| 	if opt.NewClient == nil { | ||||
| 		opt.NewClient = func(name string, opt *Options) *Client { | ||||
| 			return NewClient(opt) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if opt.HeartbeatFrequency == 0 { | ||||
| 		opt.HeartbeatFrequency = 500 * time.Millisecond | ||||
| 	} | ||||
|  | ||||
| 	if opt.NewConsistentHash == nil { | ||||
| 		opt.NewConsistentHash = newRendezvous | ||||
| 	} | ||||
|  | ||||
| 	if opt.MaxRetries == -1 { | ||||
| 		opt.MaxRetries = 0 | ||||
| 	} else if opt.MaxRetries == 0 { | ||||
| 		opt.MaxRetries = 3 | ||||
| 	} | ||||
| 	switch opt.MinRetryBackoff { | ||||
| 	case -1: | ||||
| 		opt.MinRetryBackoff = 0 | ||||
| 	case 0: | ||||
| 		opt.MinRetryBackoff = 8 * time.Millisecond | ||||
| 	} | ||||
| 	switch opt.MaxRetryBackoff { | ||||
| 	case -1: | ||||
| 		opt.MaxRetryBackoff = 0 | ||||
| 	case 0: | ||||
| 		opt.MaxRetryBackoff = 512 * time.Millisecond | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (opt *RingOptions) clientOptions() *Options { | ||||
| 	return &Options{ | ||||
| 		Dialer:    opt.Dialer, | ||||
| 		OnConnect: opt.OnConnect, | ||||
|  | ||||
| 		Username: opt.Username, | ||||
| 		Password: opt.Password, | ||||
| 		DB:       opt.DB, | ||||
|  | ||||
| 		MaxRetries: -1, | ||||
|  | ||||
| 		DialTimeout:  opt.DialTimeout, | ||||
| 		ReadTimeout:  opt.ReadTimeout, | ||||
| 		WriteTimeout: opt.WriteTimeout, | ||||
|  | ||||
| 		PoolSize:           opt.PoolSize, | ||||
| 		MinIdleConns:       opt.MinIdleConns, | ||||
| 		MaxConnAge:         opt.MaxConnAge, | ||||
| 		PoolTimeout:        opt.PoolTimeout, | ||||
| 		IdleTimeout:        opt.IdleTimeout, | ||||
| 		IdleCheckFrequency: opt.IdleCheckFrequency, | ||||
|  | ||||
| 		TLSConfig: opt.TLSConfig, | ||||
| 		Limiter:   opt.Limiter, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type ringShard struct { | ||||
| 	Client *Client | ||||
| 	down   int32 | ||||
| } | ||||
|  | ||||
| func newRingShard(opt *RingOptions, name, addr string) *ringShard { | ||||
| 	clopt := opt.clientOptions() | ||||
| 	clopt.Addr = addr | ||||
|  | ||||
| 	return &ringShard{ | ||||
| 		Client: opt.NewClient(name, clopt), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (shard *ringShard) String() string { | ||||
| 	var state string | ||||
| 	if shard.IsUp() { | ||||
| 		state = "up" | ||||
| 	} else { | ||||
| 		state = "down" | ||||
| 	} | ||||
| 	return fmt.Sprintf("%s is %s", shard.Client, state) | ||||
| } | ||||
|  | ||||
| func (shard *ringShard) IsDown() bool { | ||||
| 	const threshold = 3 | ||||
| 	return atomic.LoadInt32(&shard.down) >= threshold | ||||
| } | ||||
|  | ||||
| func (shard *ringShard) IsUp() bool { | ||||
| 	return !shard.IsDown() | ||||
| } | ||||
|  | ||||
| // Vote votes to set shard state and returns true if state was changed. | ||||
| func (shard *ringShard) Vote(up bool) bool { | ||||
| 	if up { | ||||
| 		changed := shard.IsDown() | ||||
| 		atomic.StoreInt32(&shard.down, 0) | ||||
| 		return changed | ||||
| 	} | ||||
|  | ||||
| 	if shard.IsDown() { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	atomic.AddInt32(&shard.down, 1) | ||||
| 	return shard.IsDown() | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type ringShards struct { | ||||
| 	opt *RingOptions | ||||
|  | ||||
| 	mu       sync.RWMutex | ||||
| 	hash     ConsistentHash | ||||
| 	shards   map[string]*ringShard // read only | ||||
| 	list     []*ringShard          // read only | ||||
| 	numShard int | ||||
| 	closed   bool | ||||
| } | ||||
|  | ||||
| func newRingShards(opt *RingOptions) *ringShards { | ||||
| 	shards := make(map[string]*ringShard, len(opt.Addrs)) | ||||
| 	list := make([]*ringShard, 0, len(shards)) | ||||
|  | ||||
| 	for name, addr := range opt.Addrs { | ||||
| 		shard := newRingShard(opt, name, addr) | ||||
| 		shards[name] = shard | ||||
|  | ||||
| 		list = append(list, shard) | ||||
| 	} | ||||
|  | ||||
| 	c := &ringShards{ | ||||
| 		opt: opt, | ||||
|  | ||||
| 		shards: shards, | ||||
| 		list:   list, | ||||
| 	} | ||||
| 	c.rebalance() | ||||
|  | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| func (c *ringShards) List() []*ringShard { | ||||
| 	var list []*ringShard | ||||
|  | ||||
| 	c.mu.RLock() | ||||
| 	if !c.closed { | ||||
| 		list = c.list | ||||
| 	} | ||||
| 	c.mu.RUnlock() | ||||
|  | ||||
| 	return list | ||||
| } | ||||
|  | ||||
| func (c *ringShards) Hash(key string) string { | ||||
| 	key = hashtag.Key(key) | ||||
|  | ||||
| 	var hash string | ||||
|  | ||||
| 	c.mu.RLock() | ||||
| 	if c.numShard > 0 { | ||||
| 		hash = c.hash.Get(key) | ||||
| 	} | ||||
| 	c.mu.RUnlock() | ||||
|  | ||||
| 	return hash | ||||
| } | ||||
|  | ||||
| func (c *ringShards) GetByKey(key string) (*ringShard, error) { | ||||
| 	key = hashtag.Key(key) | ||||
|  | ||||
| 	c.mu.RLock() | ||||
|  | ||||
| 	if c.closed { | ||||
| 		c.mu.RUnlock() | ||||
| 		return nil, pool.ErrClosed | ||||
| 	} | ||||
|  | ||||
| 	if c.numShard == 0 { | ||||
| 		c.mu.RUnlock() | ||||
| 		return nil, errRingShardsDown | ||||
| 	} | ||||
|  | ||||
| 	hash := c.hash.Get(key) | ||||
| 	if hash == "" { | ||||
| 		c.mu.RUnlock() | ||||
| 		return nil, errRingShardsDown | ||||
| 	} | ||||
|  | ||||
| 	shard := c.shards[hash] | ||||
| 	c.mu.RUnlock() | ||||
|  | ||||
| 	return shard, nil | ||||
| } | ||||
|  | ||||
| func (c *ringShards) GetByName(shardName string) (*ringShard, error) { | ||||
| 	if shardName == "" { | ||||
| 		return c.Random() | ||||
| 	} | ||||
|  | ||||
| 	c.mu.RLock() | ||||
| 	shard := c.shards[shardName] | ||||
| 	c.mu.RUnlock() | ||||
| 	return shard, nil | ||||
| } | ||||
|  | ||||
| func (c *ringShards) Random() (*ringShard, error) { | ||||
| 	return c.GetByKey(strconv.Itoa(rand.Int())) | ||||
| } | ||||
|  | ||||
| // heartbeat monitors state of each shard in the ring. | ||||
| func (c *ringShards) Heartbeat(frequency time.Duration) { | ||||
| 	ticker := time.NewTicker(frequency) | ||||
| 	defer ticker.Stop() | ||||
|  | ||||
| 	ctx := context.Background() | ||||
| 	for range ticker.C { | ||||
| 		var rebalance bool | ||||
|  | ||||
| 		for _, shard := range c.List() { | ||||
| 			err := shard.Client.Ping(ctx).Err() | ||||
| 			isUp := err == nil || err == pool.ErrPoolTimeout | ||||
| 			if shard.Vote(isUp) { | ||||
| 				internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard) | ||||
| 				rebalance = true | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if rebalance { | ||||
| 			c.rebalance() | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // rebalance removes dead shards from the Ring. | ||||
| func (c *ringShards) rebalance() { | ||||
| 	c.mu.RLock() | ||||
| 	shards := c.shards | ||||
| 	c.mu.RUnlock() | ||||
|  | ||||
| 	liveShards := make([]string, 0, len(shards)) | ||||
|  | ||||
| 	for name, shard := range shards { | ||||
| 		if shard.IsUp() { | ||||
| 			liveShards = append(liveShards, name) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	hash := c.opt.NewConsistentHash(liveShards) | ||||
|  | ||||
| 	c.mu.Lock() | ||||
| 	c.hash = hash | ||||
| 	c.numShard = len(liveShards) | ||||
| 	c.mu.Unlock() | ||||
| } | ||||
|  | ||||
| func (c *ringShards) Len() int { | ||||
| 	c.mu.RLock() | ||||
| 	l := c.numShard | ||||
| 	c.mu.RUnlock() | ||||
| 	return l | ||||
| } | ||||
|  | ||||
| func (c *ringShards) Close() error { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	if c.closed { | ||||
| 		return nil | ||||
| 	} | ||||
| 	c.closed = true | ||||
|  | ||||
| 	var firstErr error | ||||
| 	for _, shard := range c.shards { | ||||
| 		if err := shard.Client.Close(); err != nil && firstErr == nil { | ||||
| 			firstErr = err | ||||
| 		} | ||||
| 	} | ||||
| 	c.hash = nil | ||||
| 	c.shards = nil | ||||
| 	c.list = nil | ||||
|  | ||||
| 	return firstErr | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type ring struct { | ||||
| 	opt           *RingOptions | ||||
| 	shards        *ringShards | ||||
| 	cmdsInfoCache *cmdsInfoCache //nolint:structcheck | ||||
| } | ||||
|  | ||||
| // Ring is a Redis client that uses consistent hashing to distribute | ||||
| // keys across multiple Redis servers (shards). It's safe for | ||||
| // concurrent use by multiple goroutines. | ||||
| // | ||||
| // Ring monitors the state of each shard and removes dead shards from | ||||
| // the ring. When a shard comes online it is added back to the ring. This | ||||
| // gives you maximum availability and partition tolerance, but no | ||||
| // consistency between different shards or even clients. Each client | ||||
| // uses shards that are available to the client and does not do any | ||||
| // coordination when shard state is changed. | ||||
| // | ||||
| // Ring should be used when you need multiple Redis servers for caching | ||||
| // and can tolerate losing data when one of the servers dies. | ||||
| // Otherwise you should use Redis Cluster. | ||||
| type Ring struct { | ||||
| 	*ring | ||||
| 	cmdable | ||||
| 	hooks | ||||
| 	ctx context.Context | ||||
| } | ||||
|  | ||||
| func NewRing(opt *RingOptions) *Ring { | ||||
| 	opt.init() | ||||
|  | ||||
| 	ring := Ring{ | ||||
| 		ring: &ring{ | ||||
| 			opt:    opt, | ||||
| 			shards: newRingShards(opt), | ||||
| 		}, | ||||
| 		ctx: context.Background(), | ||||
| 	} | ||||
|  | ||||
| 	ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo) | ||||
| 	ring.cmdable = ring.Process | ||||
|  | ||||
| 	go ring.shards.Heartbeat(opt.HeartbeatFrequency) | ||||
|  | ||||
| 	return &ring | ||||
| } | ||||
|  | ||||
| func (c *Ring) Context() context.Context { | ||||
| 	return c.ctx | ||||
| } | ||||
|  | ||||
| func (c *Ring) WithContext(ctx context.Context) *Ring { | ||||
| 	if ctx == nil { | ||||
| 		panic("nil context") | ||||
| 	} | ||||
| 	clone := *c | ||||
| 	clone.cmdable = clone.Process | ||||
| 	clone.hooks.lock() | ||||
| 	clone.ctx = ctx | ||||
| 	return &clone | ||||
| } | ||||
|  | ||||
| // Do creates a Cmd from the args and processes the cmd. | ||||
| func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd { | ||||
| 	cmd := NewCmd(ctx, args...) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| func (c *Ring) Process(ctx context.Context, cmd Cmder) error { | ||||
| 	return c.hooks.process(ctx, cmd, c.process) | ||||
| } | ||||
|  | ||||
| // Options returns read-only Options that were used to create the client. | ||||
| func (c *Ring) Options() *RingOptions { | ||||
| 	return c.opt | ||||
| } | ||||
|  | ||||
| func (c *Ring) retryBackoff(attempt int) time.Duration { | ||||
| 	return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff) | ||||
| } | ||||
|  | ||||
| // PoolStats returns accumulated connection pool stats. | ||||
| func (c *Ring) PoolStats() *PoolStats { | ||||
| 	shards := c.shards.List() | ||||
| 	var acc PoolStats | ||||
| 	for _, shard := range shards { | ||||
| 		s := shard.Client.connPool.Stats() | ||||
| 		acc.Hits += s.Hits | ||||
| 		acc.Misses += s.Misses | ||||
| 		acc.Timeouts += s.Timeouts | ||||
| 		acc.TotalConns += s.TotalConns | ||||
| 		acc.IdleConns += s.IdleConns | ||||
| 	} | ||||
| 	return &acc | ||||
| } | ||||
|  | ||||
| // Len returns the current number of shards in the ring. | ||||
| func (c *Ring) Len() int { | ||||
| 	return c.shards.Len() | ||||
| } | ||||
|  | ||||
| // Subscribe subscribes the client to the specified channels. | ||||
| func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub { | ||||
| 	if len(channels) == 0 { | ||||
| 		panic("at least one channel is required") | ||||
| 	} | ||||
|  | ||||
| 	shard, err := c.shards.GetByKey(channels[0]) | ||||
| 	if err != nil { | ||||
| 		// TODO: return PubSub with sticky error | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return shard.Client.Subscribe(ctx, channels...) | ||||
| } | ||||
|  | ||||
| // PSubscribe subscribes the client to the given patterns. | ||||
| func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub { | ||||
| 	if len(channels) == 0 { | ||||
| 		panic("at least one channel is required") | ||||
| 	} | ||||
|  | ||||
| 	shard, err := c.shards.GetByKey(channels[0]) | ||||
| 	if err != nil { | ||||
| 		// TODO: return PubSub with sticky error | ||||
| 		panic(err) | ||||
| 	} | ||||
| 	return shard.Client.PSubscribe(ctx, channels...) | ||||
| } | ||||
|  | ||||
| // ForEachShard concurrently calls the fn on each live shard in the ring. | ||||
| // It returns the first error if any. | ||||
| func (c *Ring) ForEachShard( | ||||
| 	ctx context.Context, | ||||
| 	fn func(ctx context.Context, client *Client) error, | ||||
| ) error { | ||||
| 	shards := c.shards.List() | ||||
| 	var wg sync.WaitGroup | ||||
| 	errCh := make(chan error, 1) | ||||
| 	for _, shard := range shards { | ||||
| 		if shard.IsDown() { | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		wg.Add(1) | ||||
| 		go func(shard *ringShard) { | ||||
| 			defer wg.Done() | ||||
| 			err := fn(ctx, shard.Client) | ||||
| 			if err != nil { | ||||
| 				select { | ||||
| 				case errCh <- err: | ||||
| 				default: | ||||
| 				} | ||||
| 			} | ||||
| 		}(shard) | ||||
| 	} | ||||
| 	wg.Wait() | ||||
|  | ||||
| 	select { | ||||
| 	case err := <-errCh: | ||||
| 		return err | ||||
| 	default: | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) { | ||||
| 	shards := c.shards.List() | ||||
| 	var firstErr error | ||||
| 	for _, shard := range shards { | ||||
| 		cmdsInfo, err := shard.Client.Command(ctx).Result() | ||||
| 		if err == nil { | ||||
| 			return cmdsInfo, nil | ||||
| 		} | ||||
| 		if firstErr == nil { | ||||
| 			firstErr = err | ||||
| 		} | ||||
| 	} | ||||
| 	if firstErr == nil { | ||||
| 		return nil, errRingShardsDown | ||||
| 	} | ||||
| 	return nil, firstErr | ||||
| } | ||||
|  | ||||
| func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo { | ||||
| 	cmdsInfo, err := c.cmdsInfoCache.Get(ctx) | ||||
| 	if err != nil { | ||||
| 		return nil | ||||
| 	} | ||||
| 	info := cmdsInfo[name] | ||||
| 	if info == nil { | ||||
| 		internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name) | ||||
| 	} | ||||
| 	return info | ||||
| } | ||||
|  | ||||
| func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) { | ||||
| 	cmdInfo := c.cmdInfo(ctx, cmd.Name()) | ||||
| 	pos := cmdFirstKeyPos(cmd, cmdInfo) | ||||
| 	if pos == 0 { | ||||
| 		return c.shards.Random() | ||||
| 	} | ||||
| 	firstKey := cmd.stringArg(pos) | ||||
| 	return c.shards.GetByKey(firstKey) | ||||
| } | ||||
|  | ||||
| func (c *Ring) process(ctx context.Context, cmd Cmder) error { | ||||
| 	var lastErr error | ||||
| 	for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ { | ||||
| 		if attempt > 0 { | ||||
| 			if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		shard, err := c.cmdShard(ctx, cmd) | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		lastErr = shard.Client.Process(ctx, cmd) | ||||
| 		if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) { | ||||
| 			return lastErr | ||||
| 		} | ||||
| 	} | ||||
| 	return lastErr | ||||
| } | ||||
|  | ||||
| func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | ||||
| 	return c.Pipeline().Pipelined(ctx, fn) | ||||
| } | ||||
|  | ||||
| func (c *Ring) Pipeline() Pipeliner { | ||||
| 	pipe := Pipeline{ | ||||
| 		ctx:  c.ctx, | ||||
| 		exec: c.processPipeline, | ||||
| 	} | ||||
| 	pipe.init() | ||||
| 	return &pipe | ||||
| } | ||||
|  | ||||
| func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error { | ||||
| 	return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { | ||||
| 		return c.generalProcessPipeline(ctx, cmds, false) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | ||||
| 	return c.TxPipeline().Pipelined(ctx, fn) | ||||
| } | ||||
|  | ||||
| func (c *Ring) TxPipeline() Pipeliner { | ||||
| 	pipe := Pipeline{ | ||||
| 		ctx:  c.ctx, | ||||
| 		exec: c.processTxPipeline, | ||||
| 	} | ||||
| 	pipe.init() | ||||
| 	return &pipe | ||||
| } | ||||
|  | ||||
| func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error { | ||||
| 	return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error { | ||||
| 		return c.generalProcessPipeline(ctx, cmds, true) | ||||
| 	}) | ||||
| } | ||||
|  | ||||
| func (c *Ring) generalProcessPipeline( | ||||
| 	ctx context.Context, cmds []Cmder, tx bool, | ||||
| ) error { | ||||
| 	cmdsMap := make(map[string][]Cmder) | ||||
| 	for _, cmd := range cmds { | ||||
| 		cmdInfo := c.cmdInfo(ctx, cmd.Name()) | ||||
| 		hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo)) | ||||
| 		if hash != "" { | ||||
| 			hash = c.shards.Hash(hash) | ||||
| 		} | ||||
| 		cmdsMap[hash] = append(cmdsMap[hash], cmd) | ||||
| 	} | ||||
|  | ||||
| 	var wg sync.WaitGroup | ||||
| 	for hash, cmds := range cmdsMap { | ||||
| 		wg.Add(1) | ||||
| 		go func(hash string, cmds []Cmder) { | ||||
| 			defer wg.Done() | ||||
|  | ||||
| 			_ = c.processShardPipeline(ctx, hash, cmds, tx) | ||||
| 		}(hash, cmds) | ||||
| 	} | ||||
|  | ||||
| 	wg.Wait() | ||||
| 	return cmdsFirstErr(cmds) | ||||
| } | ||||
|  | ||||
| func (c *Ring) processShardPipeline( | ||||
| 	ctx context.Context, hash string, cmds []Cmder, tx bool, | ||||
| ) error { | ||||
| 	// TODO: retry? | ||||
| 	shard, err := c.shards.GetByName(hash) | ||||
| 	if err != nil { | ||||
| 		setCmdsErr(cmds, err) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if tx { | ||||
| 		return shard.Client.processTxPipeline(ctx, cmds) | ||||
| 	} | ||||
| 	return shard.Client.processPipeline(ctx, cmds) | ||||
| } | ||||
|  | ||||
| func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { | ||||
| 	if len(keys) == 0 { | ||||
| 		return fmt.Errorf("redis: Watch requires at least one key") | ||||
| 	} | ||||
|  | ||||
| 	var shards []*ringShard | ||||
| 	for _, key := range keys { | ||||
| 		if key != "" { | ||||
| 			shard, err := c.shards.GetByKey(hashtag.Key(key)) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
|  | ||||
| 			shards = append(shards, shard) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if len(shards) == 0 { | ||||
| 		return fmt.Errorf("redis: Watch requires at least one shard") | ||||
| 	} | ||||
|  | ||||
| 	if len(shards) > 1 { | ||||
| 		for _, shard := range shards[1:] { | ||||
| 			if shard.Client != shards[0].Client { | ||||
| 				err := fmt.Errorf("redis: Watch requires all keys to be in the same shard") | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return shards[0].Client.Watch(ctx, fn, keys...) | ||||
| } | ||||
|  | ||||
| // Close closes the ring client, releasing any open resources. | ||||
| // | ||||
| // It is rare to Close a Ring, as the Ring is meant to be long-lived | ||||
| // and shared between many goroutines. | ||||
| func (c *Ring) Close() error { | ||||
| 	return c.shards.Close() | ||||
| } | ||||
							
								
								
									
										65
									
								
								vendor/github.com/go-redis/redis/v8/script.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										65
									
								
								vendor/github.com/go-redis/redis/v8/script.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,65 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"crypto/sha1" | ||||
| 	"encoding/hex" | ||||
| 	"io" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| type Scripter interface { | ||||
| 	Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd | ||||
| 	EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd | ||||
| 	ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd | ||||
| 	ScriptLoad(ctx context.Context, script string) *StringCmd | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	_ Scripter = (*Client)(nil) | ||||
| 	_ Scripter = (*Ring)(nil) | ||||
| 	_ Scripter = (*ClusterClient)(nil) | ||||
| ) | ||||
|  | ||||
| type Script struct { | ||||
| 	src, hash string | ||||
| } | ||||
|  | ||||
| func NewScript(src string) *Script { | ||||
| 	h := sha1.New() | ||||
| 	_, _ = io.WriteString(h, src) | ||||
| 	return &Script{ | ||||
| 		src:  src, | ||||
| 		hash: hex.EncodeToString(h.Sum(nil)), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *Script) Hash() string { | ||||
| 	return s.hash | ||||
| } | ||||
|  | ||||
| func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd { | ||||
| 	return c.ScriptLoad(ctx, s.src) | ||||
| } | ||||
|  | ||||
| func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd { | ||||
| 	return c.ScriptExists(ctx, s.hash) | ||||
| } | ||||
|  | ||||
| func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { | ||||
| 	return c.Eval(ctx, s.src, keys, args...) | ||||
| } | ||||
|  | ||||
| func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { | ||||
| 	return c.EvalSha(ctx, s.hash, keys, args...) | ||||
| } | ||||
|  | ||||
| // Run optimistically uses EVALSHA to run the script. If script does not exist | ||||
| // it is retried using EVAL. | ||||
| func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd { | ||||
| 	r := s.EvalSha(ctx, c, keys, args...) | ||||
| 	if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") { | ||||
| 		return s.Eval(ctx, c, keys, args...) | ||||
| 	} | ||||
| 	return r | ||||
| } | ||||
							
								
								
									
										738
									
								
								vendor/github.com/go-redis/redis/v8/sentinel.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										738
									
								
								vendor/github.com/go-redis/redis/v8/sentinel.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,738 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"crypto/tls" | ||||
| 	"errors" | ||||
| 	"net" | ||||
| 	"strings" | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal" | ||||
| 	"github.com/go-redis/redis/v8/internal/pool" | ||||
| 	"github.com/go-redis/redis/v8/internal/rand" | ||||
| ) | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| // FailoverOptions are used to configure a failover client and should | ||||
| // be passed to NewFailoverClient. | ||||
| type FailoverOptions struct { | ||||
| 	// The master name. | ||||
| 	MasterName string | ||||
| 	// A seed list of host:port addresses of sentinel nodes. | ||||
| 	SentinelAddrs []string | ||||
| 	// Sentinel password from "requirepass <password>" (if enabled) in Sentinel configuration | ||||
| 	SentinelPassword string | ||||
|  | ||||
| 	// Allows routing read-only commands to the closest master or slave node. | ||||
| 	// This option only works with NewFailoverClusterClient. | ||||
| 	RouteByLatency bool | ||||
| 	// Allows routing read-only commands to the random master or slave node. | ||||
| 	// This option only works with NewFailoverClusterClient. | ||||
| 	RouteRandomly bool | ||||
|  | ||||
| 	// Route all commands to slave read-only nodes. | ||||
| 	SlaveOnly bool | ||||
|  | ||||
| 	// Following options are copied from Options struct. | ||||
|  | ||||
| 	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error) | ||||
| 	OnConnect func(ctx context.Context, cn *Conn) error | ||||
|  | ||||
| 	Username string | ||||
| 	Password string | ||||
| 	DB       int | ||||
|  | ||||
| 	MaxRetries      int | ||||
| 	MinRetryBackoff time.Duration | ||||
| 	MaxRetryBackoff time.Duration | ||||
|  | ||||
| 	DialTimeout  time.Duration | ||||
| 	ReadTimeout  time.Duration | ||||
| 	WriteTimeout time.Duration | ||||
|  | ||||
| 	PoolSize           int | ||||
| 	MinIdleConns       int | ||||
| 	MaxConnAge         time.Duration | ||||
| 	PoolTimeout        time.Duration | ||||
| 	IdleTimeout        time.Duration | ||||
| 	IdleCheckFrequency time.Duration | ||||
|  | ||||
| 	TLSConfig *tls.Config | ||||
| } | ||||
|  | ||||
| func (opt *FailoverOptions) clientOptions() *Options { | ||||
| 	return &Options{ | ||||
| 		Addr: "FailoverClient", | ||||
|  | ||||
| 		Dialer:    opt.Dialer, | ||||
| 		OnConnect: opt.OnConnect, | ||||
|  | ||||
| 		DB:       opt.DB, | ||||
| 		Username: opt.Username, | ||||
| 		Password: opt.Password, | ||||
|  | ||||
| 		MaxRetries:      opt.MaxRetries, | ||||
| 		MinRetryBackoff: opt.MinRetryBackoff, | ||||
| 		MaxRetryBackoff: opt.MaxRetryBackoff, | ||||
|  | ||||
| 		DialTimeout:  opt.DialTimeout, | ||||
| 		ReadTimeout:  opt.ReadTimeout, | ||||
| 		WriteTimeout: opt.WriteTimeout, | ||||
|  | ||||
| 		PoolSize:           opt.PoolSize, | ||||
| 		PoolTimeout:        opt.PoolTimeout, | ||||
| 		IdleTimeout:        opt.IdleTimeout, | ||||
| 		IdleCheckFrequency: opt.IdleCheckFrequency, | ||||
| 		MinIdleConns:       opt.MinIdleConns, | ||||
| 		MaxConnAge:         opt.MaxConnAge, | ||||
|  | ||||
| 		TLSConfig: opt.TLSConfig, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (opt *FailoverOptions) sentinelOptions(addr string) *Options { | ||||
| 	return &Options{ | ||||
| 		Addr: addr, | ||||
|  | ||||
| 		Dialer:    opt.Dialer, | ||||
| 		OnConnect: opt.OnConnect, | ||||
|  | ||||
| 		DB:       0, | ||||
| 		Password: opt.SentinelPassword, | ||||
|  | ||||
| 		MaxRetries:      opt.MaxRetries, | ||||
| 		MinRetryBackoff: opt.MinRetryBackoff, | ||||
| 		MaxRetryBackoff: opt.MaxRetryBackoff, | ||||
|  | ||||
| 		DialTimeout:  opt.DialTimeout, | ||||
| 		ReadTimeout:  opt.ReadTimeout, | ||||
| 		WriteTimeout: opt.WriteTimeout, | ||||
|  | ||||
| 		PoolSize:           opt.PoolSize, | ||||
| 		PoolTimeout:        opt.PoolTimeout, | ||||
| 		IdleTimeout:        opt.IdleTimeout, | ||||
| 		IdleCheckFrequency: opt.IdleCheckFrequency, | ||||
| 		MinIdleConns:       opt.MinIdleConns, | ||||
| 		MaxConnAge:         opt.MaxConnAge, | ||||
|  | ||||
| 		TLSConfig: opt.TLSConfig, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (opt *FailoverOptions) clusterOptions() *ClusterOptions { | ||||
| 	return &ClusterOptions{ | ||||
| 		Dialer:    opt.Dialer, | ||||
| 		OnConnect: opt.OnConnect, | ||||
|  | ||||
| 		Username: opt.Username, | ||||
| 		Password: opt.Password, | ||||
|  | ||||
| 		MaxRedirects: opt.MaxRetries, | ||||
|  | ||||
| 		RouteByLatency: opt.RouteByLatency, | ||||
| 		RouteRandomly:  opt.RouteRandomly, | ||||
|  | ||||
| 		MinRetryBackoff: opt.MinRetryBackoff, | ||||
| 		MaxRetryBackoff: opt.MaxRetryBackoff, | ||||
|  | ||||
| 		DialTimeout:  opt.DialTimeout, | ||||
| 		ReadTimeout:  opt.ReadTimeout, | ||||
| 		WriteTimeout: opt.WriteTimeout, | ||||
|  | ||||
| 		PoolSize:           opt.PoolSize, | ||||
| 		PoolTimeout:        opt.PoolTimeout, | ||||
| 		IdleTimeout:        opt.IdleTimeout, | ||||
| 		IdleCheckFrequency: opt.IdleCheckFrequency, | ||||
| 		MinIdleConns:       opt.MinIdleConns, | ||||
| 		MaxConnAge:         opt.MaxConnAge, | ||||
|  | ||||
| 		TLSConfig: opt.TLSConfig, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // NewFailoverClient returns a Redis client that uses Redis Sentinel | ||||
| // for automatic failover. It's safe for concurrent use by multiple | ||||
| // goroutines. | ||||
| func NewFailoverClient(failoverOpt *FailoverOptions) *Client { | ||||
| 	if failoverOpt.RouteByLatency { | ||||
| 		panic("to route commands by latency, use NewFailoverClusterClient") | ||||
| 	} | ||||
| 	if failoverOpt.RouteRandomly { | ||||
| 		panic("to route commands randomly, use NewFailoverClusterClient") | ||||
| 	} | ||||
|  | ||||
| 	sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs)) | ||||
| 	copy(sentinelAddrs, failoverOpt.SentinelAddrs) | ||||
|  | ||||
| 	failover := &sentinelFailover{ | ||||
| 		opt:           failoverOpt, | ||||
| 		sentinelAddrs: sentinelAddrs, | ||||
| 	} | ||||
|  | ||||
| 	opt := failoverOpt.clientOptions() | ||||
| 	opt.Dialer = masterSlaveDialer(failover) | ||||
| 	opt.init() | ||||
|  | ||||
| 	connPool := newConnPool(opt) | ||||
|  | ||||
| 	failover.mu.Lock() | ||||
| 	failover.onFailover = func(ctx context.Context, addr string) { | ||||
| 		_ = connPool.Filter(func(cn *pool.Conn) bool { | ||||
| 			return cn.RemoteAddr().String() != addr | ||||
| 		}) | ||||
| 	} | ||||
| 	failover.mu.Unlock() | ||||
|  | ||||
| 	c := Client{ | ||||
| 		baseClient: newBaseClient(opt, connPool), | ||||
| 		ctx:        context.Background(), | ||||
| 	} | ||||
| 	c.cmdable = c.Process | ||||
| 	c.onClose = failover.Close | ||||
|  | ||||
| 	return &c | ||||
| } | ||||
|  | ||||
| func masterSlaveDialer( | ||||
| 	failover *sentinelFailover, | ||||
| ) func(ctx context.Context, network, addr string) (net.Conn, error) { | ||||
| 	return func(ctx context.Context, network, _ string) (net.Conn, error) { | ||||
| 		var addr string | ||||
| 		var err error | ||||
|  | ||||
| 		if failover.opt.SlaveOnly { | ||||
| 			addr, err = failover.RandomSlaveAddr(ctx) | ||||
| 		} else { | ||||
| 			addr, err = failover.MasterAddr(ctx) | ||||
| 			if err == nil { | ||||
| 				failover.trySwitchMaster(ctx, addr) | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		if failover.opt.Dialer != nil { | ||||
| 			return failover.opt.Dialer(ctx, network, addr) | ||||
| 		} | ||||
| 		return net.DialTimeout("tcp", addr, failover.opt.DialTimeout) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| // SentinelClient is a client for a Redis Sentinel. | ||||
| type SentinelClient struct { | ||||
| 	*baseClient | ||||
| 	hooks | ||||
| 	ctx context.Context | ||||
| } | ||||
|  | ||||
| func NewSentinelClient(opt *Options) *SentinelClient { | ||||
| 	opt.init() | ||||
| 	c := &SentinelClient{ | ||||
| 		baseClient: &baseClient{ | ||||
| 			opt:      opt, | ||||
| 			connPool: newConnPool(opt), | ||||
| 		}, | ||||
| 		ctx: context.Background(), | ||||
| 	} | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| func (c *SentinelClient) Context() context.Context { | ||||
| 	return c.ctx | ||||
| } | ||||
|  | ||||
| func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient { | ||||
| 	if ctx == nil { | ||||
| 		panic("nil context") | ||||
| 	} | ||||
| 	clone := *c | ||||
| 	clone.ctx = ctx | ||||
| 	return &clone | ||||
| } | ||||
|  | ||||
| func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error { | ||||
| 	return c.hooks.process(ctx, cmd, c.baseClient.process) | ||||
| } | ||||
|  | ||||
| func (c *SentinelClient) pubSub() *PubSub { | ||||
| 	pubsub := &PubSub{ | ||||
| 		opt: c.opt, | ||||
|  | ||||
| 		newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) { | ||||
| 			return c.newConn(ctx) | ||||
| 		}, | ||||
| 		closeConn: c.connPool.CloseConn, | ||||
| 	} | ||||
| 	pubsub.init() | ||||
| 	return pubsub | ||||
| } | ||||
|  | ||||
| // Ping is used to test if a connection is still alive, or to | ||||
| // measure latency. | ||||
| func (c *SentinelClient) Ping(ctx context.Context) *StringCmd { | ||||
| 	cmd := NewStringCmd(ctx, "ping") | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Subscribe subscribes the client to the specified channels. | ||||
| // Channels can be omitted to create empty subscription. | ||||
| func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub { | ||||
| 	pubsub := c.pubSub() | ||||
| 	if len(channels) > 0 { | ||||
| 		_ = pubsub.Subscribe(ctx, channels...) | ||||
| 	} | ||||
| 	return pubsub | ||||
| } | ||||
|  | ||||
| // PSubscribe subscribes the client to the given patterns. | ||||
| // Patterns can be omitted to create empty subscription. | ||||
| func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub { | ||||
| 	pubsub := c.pubSub() | ||||
| 	if len(channels) > 0 { | ||||
| 		_ = pubsub.PSubscribe(ctx, channels...) | ||||
| 	} | ||||
| 	return pubsub | ||||
| } | ||||
|  | ||||
| func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd { | ||||
| 	cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd { | ||||
| 	cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Failover forces a failover as if the master was not reachable, and without | ||||
| // asking for agreement to other Sentinels. | ||||
| func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd { | ||||
| 	cmd := NewStatusCmd(ctx, "sentinel", "failover", name) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Reset resets all the masters with matching name. The pattern argument is a | ||||
| // glob-style pattern. The reset process clears any previous state in a master | ||||
| // (including a failover in progress), and removes every slave and sentinel | ||||
| // already discovered and associated with the master. | ||||
| func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd { | ||||
| 	cmd := NewIntCmd(ctx, "sentinel", "reset", pattern) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // FlushConfig forces Sentinel to rewrite its configuration on disk, including | ||||
| // the current Sentinel state. | ||||
| func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd { | ||||
| 	cmd := NewStatusCmd(ctx, "sentinel", "flushconfig") | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Master shows the state and info of the specified master. | ||||
| func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd { | ||||
| 	cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Masters shows a list of monitored masters and their state. | ||||
| func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd { | ||||
| 	cmd := NewSliceCmd(ctx, "sentinel", "masters") | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Slaves shows a list of slaves for the specified master and their state. | ||||
| func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd { | ||||
| 	cmd := NewSliceCmd(ctx, "sentinel", "slaves", name) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // CkQuorum checks if the current Sentinel configuration is able to reach the | ||||
| // quorum needed to failover a master, and the majority needed to authorize the | ||||
| // failover. This command should be used in monitoring systems to check if a | ||||
| // Sentinel deployment is ok. | ||||
| func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd { | ||||
| 	cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Monitor tells the Sentinel to start monitoring a new master with the specified | ||||
| // name, ip, port, and quorum. | ||||
| func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd { | ||||
| 	cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Set is used in order to change configuration parameters of a specific master. | ||||
| func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd { | ||||
| 	cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Remove is used in order to remove the specified master: the master will no | ||||
| // longer be monitored, and will totally be removed from the internal state of | ||||
| // the Sentinel. | ||||
| func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd { | ||||
| 	cmd := NewStringCmd(ctx, "sentinel", "remove", name) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| type sentinelFailover struct { | ||||
| 	opt *FailoverOptions | ||||
|  | ||||
| 	sentinelAddrs []string | ||||
|  | ||||
| 	onFailover func(ctx context.Context, addr string) | ||||
| 	onUpdate   func(ctx context.Context) | ||||
|  | ||||
| 	mu          sync.RWMutex | ||||
| 	_masterAddr string | ||||
| 	sentinel    *SentinelClient | ||||
| 	pubsub      *PubSub | ||||
| } | ||||
|  | ||||
| func (c *sentinelFailover) Close() error { | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
| 	if c.sentinel != nil { | ||||
| 		return c.closeSentinel() | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (c *sentinelFailover) closeSentinel() error { | ||||
| 	firstErr := c.pubsub.Close() | ||||
| 	c.pubsub = nil | ||||
|  | ||||
| 	err := c.sentinel.Close() | ||||
| 	if err != nil && firstErr == nil { | ||||
| 		firstErr = err | ||||
| 	} | ||||
| 	c.sentinel = nil | ||||
|  | ||||
| 	return firstErr | ||||
| } | ||||
|  | ||||
| func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) { | ||||
| 	addresses, err := c.slaveAddrs(ctx) | ||||
| 	if err != nil { | ||||
| 		return "", err | ||||
| 	} | ||||
| 	if len(addresses) == 0 { | ||||
| 		return c.MasterAddr(ctx) | ||||
| 	} | ||||
| 	return addresses[rand.Intn(len(addresses))], nil | ||||
| } | ||||
|  | ||||
| func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) { | ||||
| 	c.mu.RLock() | ||||
| 	sentinel := c.sentinel | ||||
| 	c.mu.RUnlock() | ||||
|  | ||||
| 	if sentinel != nil { | ||||
| 		addr := c.getMasterAddr(ctx, sentinel) | ||||
| 		if addr != "" { | ||||
| 			return addr, nil | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	if c.sentinel != nil { | ||||
| 		addr := c.getMasterAddr(ctx, c.sentinel) | ||||
| 		if addr != "" { | ||||
| 			return addr, nil | ||||
| 		} | ||||
| 		_ = c.closeSentinel() | ||||
| 	} | ||||
|  | ||||
| 	for i, sentinelAddr := range c.sentinelAddrs { | ||||
| 		sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) | ||||
|  | ||||
| 		masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() | ||||
| 		if err != nil { | ||||
| 			internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s", | ||||
| 				c.opt.MasterName, err) | ||||
| 			_ = sentinel.Close() | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		// Push working sentinel to the top. | ||||
| 		c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] | ||||
| 		c.setSentinel(ctx, sentinel) | ||||
|  | ||||
| 		addr := net.JoinHostPort(masterAddr[0], masterAddr[1]) | ||||
| 		return addr, nil | ||||
| 	} | ||||
|  | ||||
| 	return "", errors.New("redis: all sentinels specified in configuration are unreachable") | ||||
| } | ||||
|  | ||||
| func (c *sentinelFailover) slaveAddrs(ctx context.Context) ([]string, error) { | ||||
| 	c.mu.RLock() | ||||
| 	sentinel := c.sentinel | ||||
| 	c.mu.RUnlock() | ||||
|  | ||||
| 	if sentinel != nil { | ||||
| 		addrs := c.getSlaveAddrs(ctx, sentinel) | ||||
| 		if len(addrs) > 0 { | ||||
| 			return addrs, nil | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	if c.sentinel != nil { | ||||
| 		addrs := c.getSlaveAddrs(ctx, c.sentinel) | ||||
| 		if len(addrs) > 0 { | ||||
| 			return addrs, nil | ||||
| 		} | ||||
| 		_ = c.closeSentinel() | ||||
| 	} | ||||
|  | ||||
| 	for i, sentinelAddr := range c.sentinelAddrs { | ||||
| 		sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr)) | ||||
|  | ||||
| 		slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result() | ||||
| 		if err != nil { | ||||
| 			internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s", | ||||
| 				c.opt.MasterName, err) | ||||
| 			_ = sentinel.Close() | ||||
| 			continue | ||||
| 		} | ||||
|  | ||||
| 		// Push working sentinel to the top. | ||||
| 		c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0] | ||||
| 		c.setSentinel(ctx, sentinel) | ||||
|  | ||||
| 		addrs := parseSlaveAddrs(slaves) | ||||
| 		return addrs, nil | ||||
| 	} | ||||
|  | ||||
| 	return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable") | ||||
| } | ||||
|  | ||||
| func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string { | ||||
| 	addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result() | ||||
| 	if err != nil { | ||||
| 		internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s", | ||||
| 			c.opt.MasterName, err) | ||||
| 		return "" | ||||
| 	} | ||||
| 	return net.JoinHostPort(addr[0], addr[1]) | ||||
| } | ||||
|  | ||||
| func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string { | ||||
| 	addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result() | ||||
| 	if err != nil { | ||||
| 		internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s", | ||||
| 			c.opt.MasterName, err) | ||||
| 		return []string{} | ||||
| 	} | ||||
| 	return parseSlaveAddrs(addrs) | ||||
| } | ||||
|  | ||||
| func parseSlaveAddrs(addrs []interface{}) []string { | ||||
| 	nodes := make([]string, 0, len(addrs)) | ||||
|  | ||||
| 	for _, node := range addrs { | ||||
| 		ip := "" | ||||
| 		port := "" | ||||
| 		flags := []string{} | ||||
| 		lastkey := "" | ||||
| 		isDown := false | ||||
|  | ||||
| 		for _, key := range node.([]interface{}) { | ||||
| 			switch lastkey { | ||||
| 			case "ip": | ||||
| 				ip = key.(string) | ||||
| 			case "port": | ||||
| 				port = key.(string) | ||||
| 			case "flags": | ||||
| 				flags = strings.Split(key.(string), ",") | ||||
| 			} | ||||
| 			lastkey = key.(string) | ||||
| 		} | ||||
|  | ||||
| 		for _, flag := range flags { | ||||
| 			switch flag { | ||||
| 			case "s_down", "o_down", "disconnected": | ||||
| 				isDown = true | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		if !isDown { | ||||
| 			nodes = append(nodes, net.JoinHostPort(ip, port)) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return nodes | ||||
| } | ||||
|  | ||||
| func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) { | ||||
| 	c.mu.RLock() | ||||
| 	currentAddr := c._masterAddr | ||||
| 	c.mu.RUnlock() | ||||
|  | ||||
| 	if addr == currentAddr { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	c.mu.Lock() | ||||
| 	defer c.mu.Unlock() | ||||
|  | ||||
| 	if addr == c._masterAddr { | ||||
| 		return | ||||
| 	} | ||||
| 	c._masterAddr = addr | ||||
|  | ||||
| 	internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q", | ||||
| 		c.opt.MasterName, addr) | ||||
| 	if c.onFailover != nil { | ||||
| 		c.onFailover(ctx, addr) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) { | ||||
| 	if c.sentinel != nil { | ||||
| 		panic("not reached") | ||||
| 	} | ||||
| 	c.sentinel = sentinel | ||||
| 	c.discoverSentinels(ctx) | ||||
|  | ||||
| 	c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done") | ||||
| 	go c.listen(c.pubsub) | ||||
| } | ||||
|  | ||||
| func (c *sentinelFailover) discoverSentinels(ctx context.Context) { | ||||
| 	sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result() | ||||
| 	if err != nil { | ||||
| 		internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err) | ||||
| 		return | ||||
| 	} | ||||
| 	for _, sentinel := range sentinels { | ||||
| 		vals := sentinel.([]interface{}) | ||||
| 		for i := 0; i < len(vals); i += 2 { | ||||
| 			key := vals[i].(string) | ||||
| 			if key == "name" { | ||||
| 				sentinelAddr := vals[i+1].(string) | ||||
| 				if !contains(c.sentinelAddrs, sentinelAddr) { | ||||
| 					internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q", | ||||
| 						sentinelAddr, c.opt.MasterName) | ||||
| 					c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr) | ||||
| 				} | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *sentinelFailover) listen(pubsub *PubSub) { | ||||
| 	ctx := context.TODO() | ||||
|  | ||||
| 	if c.onUpdate != nil { | ||||
| 		c.onUpdate(ctx) | ||||
| 	} | ||||
|  | ||||
| 	ch := pubsub.Channel() | ||||
| 	for msg := range ch { | ||||
| 		if msg.Channel == "+switch-master" { | ||||
| 			parts := strings.Split(msg.Payload, " ") | ||||
| 			if parts[0] != c.opt.MasterName { | ||||
| 				internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0]) | ||||
| 				continue | ||||
| 			} | ||||
| 			addr := net.JoinHostPort(parts[3], parts[4]) | ||||
| 			c.trySwitchMaster(pubsub.getContext(), addr) | ||||
| 		} | ||||
|  | ||||
| 		if c.onUpdate != nil { | ||||
| 			c.onUpdate(ctx) | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func contains(slice []string, str string) bool { | ||||
| 	for _, s := range slice { | ||||
| 		if s == str { | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| //------------------------------------------------------------------------------ | ||||
|  | ||||
| // NewFailoverClusterClient returns a client that supports routing read-only commands | ||||
| // to a slave node. | ||||
| func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient { | ||||
| 	sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs)) | ||||
| 	copy(sentinelAddrs, failoverOpt.SentinelAddrs) | ||||
|  | ||||
| 	failover := &sentinelFailover{ | ||||
| 		opt:           failoverOpt, | ||||
| 		sentinelAddrs: sentinelAddrs, | ||||
| 	} | ||||
|  | ||||
| 	opt := failoverOpt.clusterOptions() | ||||
| 	opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) { | ||||
| 		masterAddr, err := failover.MasterAddr(ctx) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		nodes := []ClusterNode{{ | ||||
| 			Addr: masterAddr, | ||||
| 		}} | ||||
|  | ||||
| 		slaveAddrs, err := failover.slaveAddrs(ctx) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
|  | ||||
| 		for _, slaveAddr := range slaveAddrs { | ||||
| 			nodes = append(nodes, ClusterNode{ | ||||
| 				Addr: slaveAddr, | ||||
| 			}) | ||||
| 		} | ||||
|  | ||||
| 		slots := []ClusterSlot{ | ||||
| 			{ | ||||
| 				Start: 0, | ||||
| 				End:   16383, | ||||
| 				Nodes: nodes, | ||||
| 			}, | ||||
| 		} | ||||
| 		return slots, nil | ||||
| 	} | ||||
|  | ||||
| 	c := NewClusterClient(opt) | ||||
|  | ||||
| 	failover.mu.Lock() | ||||
| 	failover.onUpdate = func(ctx context.Context) { | ||||
| 		c.ReloadState(ctx) | ||||
| 	} | ||||
| 	failover.mu.Unlock() | ||||
|  | ||||
| 	return c | ||||
| } | ||||
							
								
								
									
										148
									
								
								vendor/github.com/go-redis/redis/v8/tx.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										148
									
								
								vendor/github.com/go-redis/redis/v8/tx.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,148 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	"github.com/go-redis/redis/v8/internal/pool" | ||||
| 	"github.com/go-redis/redis/v8/internal/proto" | ||||
| ) | ||||
|  | ||||
| // TxFailedErr transaction redis failed. | ||||
| const TxFailedErr = proto.RedisError("redis: transaction failed") | ||||
|  | ||||
| // Tx implements Redis transactions as described in | ||||
| // http://redis.io/topics/transactions. It's NOT safe for concurrent use | ||||
| // by multiple goroutines, because Exec resets list of watched keys. | ||||
| // If you don't need WATCH it is better to use Pipeline. | ||||
| type Tx struct { | ||||
| 	baseClient | ||||
| 	cmdable | ||||
| 	statefulCmdable | ||||
| 	hooks | ||||
| 	ctx context.Context | ||||
| } | ||||
|  | ||||
| func (c *Client) newTx(ctx context.Context) *Tx { | ||||
| 	tx := Tx{ | ||||
| 		baseClient: baseClient{ | ||||
| 			opt:      c.opt, | ||||
| 			connPool: pool.NewStickyConnPool(c.connPool), | ||||
| 		}, | ||||
| 		hooks: c.hooks.clone(), | ||||
| 		ctx:   ctx, | ||||
| 	} | ||||
| 	tx.init() | ||||
| 	return &tx | ||||
| } | ||||
|  | ||||
| func (c *Tx) init() { | ||||
| 	c.cmdable = c.Process | ||||
| 	c.statefulCmdable = c.Process | ||||
| } | ||||
|  | ||||
| func (c *Tx) Context() context.Context { | ||||
| 	return c.ctx | ||||
| } | ||||
|  | ||||
| func (c *Tx) WithContext(ctx context.Context) *Tx { | ||||
| 	if ctx == nil { | ||||
| 		panic("nil context") | ||||
| 	} | ||||
| 	clone := *c | ||||
| 	clone.init() | ||||
| 	clone.hooks.lock() | ||||
| 	clone.ctx = ctx | ||||
| 	return &clone | ||||
| } | ||||
|  | ||||
| func (c *Tx) Process(ctx context.Context, cmd Cmder) error { | ||||
| 	return c.hooks.process(ctx, cmd, c.baseClient.process) | ||||
| } | ||||
|  | ||||
| // Watch prepares a transaction and marks the keys to be watched | ||||
| // for conditional execution if there are any keys. | ||||
| // | ||||
| // The transaction is automatically closed when fn exits. | ||||
| func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error { | ||||
| 	tx := c.newTx(ctx) | ||||
| 	defer tx.Close(ctx) | ||||
| 	if len(keys) > 0 { | ||||
| 		if err := tx.Watch(ctx, keys...).Err(); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	return fn(tx) | ||||
| } | ||||
|  | ||||
| // Close closes the transaction, releasing any open resources. | ||||
| func (c *Tx) Close(ctx context.Context) error { | ||||
| 	_ = c.Unwatch(ctx).Err() | ||||
| 	return c.baseClient.Close() | ||||
| } | ||||
|  | ||||
| // Watch marks the keys to be watched for conditional execution | ||||
| // of a transaction. | ||||
| func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd { | ||||
| 	args := make([]interface{}, 1+len(keys)) | ||||
| 	args[0] = "watch" | ||||
| 	for i, key := range keys { | ||||
| 		args[1+i] = key | ||||
| 	} | ||||
| 	cmd := NewStatusCmd(ctx, args...) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Unwatch flushes all the previously watched keys for a transaction. | ||||
| func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd { | ||||
| 	args := make([]interface{}, 1+len(keys)) | ||||
| 	args[0] = "unwatch" | ||||
| 	for i, key := range keys { | ||||
| 		args[1+i] = key | ||||
| 	} | ||||
| 	cmd := NewStatusCmd(ctx, args...) | ||||
| 	_ = c.Process(ctx, cmd) | ||||
| 	return cmd | ||||
| } | ||||
|  | ||||
| // Pipeline creates a pipeline. Usually it is more convenient to use Pipelined. | ||||
| func (c *Tx) Pipeline() Pipeliner { | ||||
| 	pipe := Pipeline{ | ||||
| 		ctx: c.ctx, | ||||
| 		exec: func(ctx context.Context, cmds []Cmder) error { | ||||
| 			return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline) | ||||
| 		}, | ||||
| 	} | ||||
| 	pipe.init() | ||||
| 	return &pipe | ||||
| } | ||||
|  | ||||
| // Pipelined executes commands queued in the fn outside of the transaction. | ||||
| // Use TxPipelined if you need transactional behavior. | ||||
| func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | ||||
| 	return c.Pipeline().Pipelined(ctx, fn) | ||||
| } | ||||
|  | ||||
| // TxPipelined executes commands queued in the fn in the transaction. | ||||
| // | ||||
| // When using WATCH, EXEC will execute commands only if the watched keys | ||||
| // were not modified, allowing for a check-and-set mechanism. | ||||
| // | ||||
| // Exec always returns list of commands. If transaction fails | ||||
| // TxFailedErr is returned. Otherwise Exec returns an error of the first | ||||
| // failed command or nil. | ||||
| func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) { | ||||
| 	return c.TxPipeline().Pipelined(ctx, fn) | ||||
| } | ||||
|  | ||||
| // TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined. | ||||
| func (c *Tx) TxPipeline() Pipeliner { | ||||
| 	pipe := Pipeline{ | ||||
| 		ctx: c.ctx, | ||||
| 		exec: func(ctx context.Context, cmds []Cmder) error { | ||||
| 			return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline) | ||||
| 		}, | ||||
| 	} | ||||
| 	pipe.init() | ||||
| 	return &pipe | ||||
| } | ||||
							
								
								
									
										206
									
								
								vendor/github.com/go-redis/redis/v8/universal.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										206
									
								
								vendor/github.com/go-redis/redis/v8/universal.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,206 @@ | ||||
| package redis | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"crypto/tls" | ||||
| 	"net" | ||||
| 	"time" | ||||
| ) | ||||
|  | ||||
| // UniversalOptions information is required by UniversalClient to establish | ||||
| // connections. | ||||
| type UniversalOptions struct { | ||||
| 	// Either a single address or a seed list of host:port addresses | ||||
| 	// of cluster/sentinel nodes. | ||||
| 	Addrs []string | ||||
|  | ||||
| 	// Database to be selected after connecting to the server. | ||||
| 	// Only single-node and failover clients. | ||||
| 	DB int | ||||
|  | ||||
| 	// Common options. | ||||
|  | ||||
| 	Dialer    func(ctx context.Context, network, addr string) (net.Conn, error) | ||||
| 	OnConnect func(ctx context.Context, cn *Conn) error | ||||
|  | ||||
| 	Username         string | ||||
| 	Password         string | ||||
| 	SentinelPassword string | ||||
|  | ||||
| 	MaxRetries      int | ||||
| 	MinRetryBackoff time.Duration | ||||
| 	MaxRetryBackoff time.Duration | ||||
|  | ||||
| 	DialTimeout  time.Duration | ||||
| 	ReadTimeout  time.Duration | ||||
| 	WriteTimeout time.Duration | ||||
|  | ||||
| 	PoolSize           int | ||||
| 	MinIdleConns       int | ||||
| 	MaxConnAge         time.Duration | ||||
| 	PoolTimeout        time.Duration | ||||
| 	IdleTimeout        time.Duration | ||||
| 	IdleCheckFrequency time.Duration | ||||
|  | ||||
| 	TLSConfig *tls.Config | ||||
|  | ||||
| 	// Only cluster clients. | ||||
|  | ||||
| 	MaxRedirects   int | ||||
| 	ReadOnly       bool | ||||
| 	RouteByLatency bool | ||||
| 	RouteRandomly  bool | ||||
|  | ||||
| 	// The sentinel master name. | ||||
| 	// Only failover clients. | ||||
| 	MasterName string | ||||
| } | ||||
|  | ||||
| // Cluster returns cluster options created from the universal options. | ||||
| func (o *UniversalOptions) Cluster() *ClusterOptions { | ||||
| 	if len(o.Addrs) == 0 { | ||||
| 		o.Addrs = []string{"127.0.0.1:6379"} | ||||
| 	} | ||||
|  | ||||
| 	return &ClusterOptions{ | ||||
| 		Addrs:     o.Addrs, | ||||
| 		Dialer:    o.Dialer, | ||||
| 		OnConnect: o.OnConnect, | ||||
|  | ||||
| 		Username: o.Username, | ||||
| 		Password: o.Password, | ||||
|  | ||||
| 		MaxRedirects:   o.MaxRedirects, | ||||
| 		ReadOnly:       o.ReadOnly, | ||||
| 		RouteByLatency: o.RouteByLatency, | ||||
| 		RouteRandomly:  o.RouteRandomly, | ||||
|  | ||||
| 		MaxRetries:      o.MaxRetries, | ||||
| 		MinRetryBackoff: o.MinRetryBackoff, | ||||
| 		MaxRetryBackoff: o.MaxRetryBackoff, | ||||
|  | ||||
| 		DialTimeout:        o.DialTimeout, | ||||
| 		ReadTimeout:        o.ReadTimeout, | ||||
| 		WriteTimeout:       o.WriteTimeout, | ||||
| 		PoolSize:           o.PoolSize, | ||||
| 		MinIdleConns:       o.MinIdleConns, | ||||
| 		MaxConnAge:         o.MaxConnAge, | ||||
| 		PoolTimeout:        o.PoolTimeout, | ||||
| 		IdleTimeout:        o.IdleTimeout, | ||||
| 		IdleCheckFrequency: o.IdleCheckFrequency, | ||||
|  | ||||
| 		TLSConfig: o.TLSConfig, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Failover returns failover options created from the universal options. | ||||
| func (o *UniversalOptions) Failover() *FailoverOptions { | ||||
| 	if len(o.Addrs) == 0 { | ||||
| 		o.Addrs = []string{"127.0.0.1:26379"} | ||||
| 	} | ||||
|  | ||||
| 	return &FailoverOptions{ | ||||
| 		SentinelAddrs: o.Addrs, | ||||
| 		MasterName:    o.MasterName, | ||||
|  | ||||
| 		Dialer:    o.Dialer, | ||||
| 		OnConnect: o.OnConnect, | ||||
|  | ||||
| 		DB:               o.DB, | ||||
| 		Username:         o.Username, | ||||
| 		Password:         o.Password, | ||||
| 		SentinelPassword: o.SentinelPassword, | ||||
|  | ||||
| 		MaxRetries:      o.MaxRetries, | ||||
| 		MinRetryBackoff: o.MinRetryBackoff, | ||||
| 		MaxRetryBackoff: o.MaxRetryBackoff, | ||||
|  | ||||
| 		DialTimeout:  o.DialTimeout, | ||||
| 		ReadTimeout:  o.ReadTimeout, | ||||
| 		WriteTimeout: o.WriteTimeout, | ||||
|  | ||||
| 		PoolSize:           o.PoolSize, | ||||
| 		MinIdleConns:       o.MinIdleConns, | ||||
| 		MaxConnAge:         o.MaxConnAge, | ||||
| 		PoolTimeout:        o.PoolTimeout, | ||||
| 		IdleTimeout:        o.IdleTimeout, | ||||
| 		IdleCheckFrequency: o.IdleCheckFrequency, | ||||
|  | ||||
| 		TLSConfig: o.TLSConfig, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Simple returns basic options created from the universal options. | ||||
| func (o *UniversalOptions) Simple() *Options { | ||||
| 	addr := "127.0.0.1:6379" | ||||
| 	if len(o.Addrs) > 0 { | ||||
| 		addr = o.Addrs[0] | ||||
| 	} | ||||
|  | ||||
| 	return &Options{ | ||||
| 		Addr:      addr, | ||||
| 		Dialer:    o.Dialer, | ||||
| 		OnConnect: o.OnConnect, | ||||
|  | ||||
| 		DB:       o.DB, | ||||
| 		Username: o.Username, | ||||
| 		Password: o.Password, | ||||
|  | ||||
| 		MaxRetries:      o.MaxRetries, | ||||
| 		MinRetryBackoff: o.MinRetryBackoff, | ||||
| 		MaxRetryBackoff: o.MaxRetryBackoff, | ||||
|  | ||||
| 		DialTimeout:  o.DialTimeout, | ||||
| 		ReadTimeout:  o.ReadTimeout, | ||||
| 		WriteTimeout: o.WriteTimeout, | ||||
|  | ||||
| 		PoolSize:           o.PoolSize, | ||||
| 		MinIdleConns:       o.MinIdleConns, | ||||
| 		MaxConnAge:         o.MaxConnAge, | ||||
| 		PoolTimeout:        o.PoolTimeout, | ||||
| 		IdleTimeout:        o.IdleTimeout, | ||||
| 		IdleCheckFrequency: o.IdleCheckFrequency, | ||||
|  | ||||
| 		TLSConfig: o.TLSConfig, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // -------------------------------------------------------------------- | ||||
|  | ||||
| // UniversalClient is an abstract client which - based on the provided options - | ||||
| // can connect to either clusters, or sentinel-backed failover instances | ||||
| // or simple single-instance servers. This can be useful for testing | ||||
| // cluster-specific applications locally. | ||||
| type UniversalClient interface { | ||||
| 	Cmdable | ||||
| 	Context() context.Context | ||||
| 	AddHook(Hook) | ||||
| 	Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error | ||||
| 	Do(ctx context.Context, args ...interface{}) *Cmd | ||||
| 	Process(ctx context.Context, cmd Cmder) error | ||||
| 	Subscribe(ctx context.Context, channels ...string) *PubSub | ||||
| 	PSubscribe(ctx context.Context, channels ...string) *PubSub | ||||
| 	Close() error | ||||
| 	PoolStats() *PoolStats | ||||
| } | ||||
|  | ||||
| var ( | ||||
| 	_ UniversalClient = (*Client)(nil) | ||||
| 	_ UniversalClient = (*ClusterClient)(nil) | ||||
| 	_ UniversalClient = (*Ring)(nil) | ||||
| ) | ||||
|  | ||||
| // NewUniversalClient returns a new multi client. The type of client returned depends | ||||
| // on the following three conditions: | ||||
| // | ||||
| // 1. if a MasterName is passed a sentinel-backed FailoverClient will be returned | ||||
| // 2. if the number of Addrs is two or more, a ClusterClient will be returned | ||||
| // 3. otherwise, a single-node redis Client will be returned. | ||||
| func NewUniversalClient(opts *UniversalOptions) UniversalClient { | ||||
| 	if opts.MasterName != "" { | ||||
| 		return NewFailoverClient(opts.Failover()) | ||||
| 	} else if len(opts.Addrs) > 1 { | ||||
| 		return NewClusterClient(opts.Cluster()) | ||||
| 	} | ||||
| 	return NewClient(opts.Simple()) | ||||
| } | ||||
		Reference in New Issue
	
	Block a user